1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 5 * 6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/clk/clk-conf.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/spinlock.h> 15 #include <linux/err.h> 16 #include <linux/list.h> 17 #include <linux/slab.h> 18 #include <linux/of.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/sched.h> 23 #include <linux/clkdev.h> 24 25 #include "clk.h" 26 27 static DEFINE_SPINLOCK(enable_lock); 28 static DEFINE_MUTEX(prepare_lock); 29 30 static struct task_struct *prepare_owner; 31 static struct task_struct *enable_owner; 32 33 static int prepare_refcnt; 34 static int enable_refcnt; 35 36 static HLIST_HEAD(clk_root_list); 37 static HLIST_HEAD(clk_orphan_list); 38 static LIST_HEAD(clk_notifier_list); 39 40 static const struct hlist_head *all_lists[] = { 41 &clk_root_list, 42 &clk_orphan_list, 43 NULL, 44 }; 45 46 /*** private data structures ***/ 47 48 struct clk_parent_map { 49 const struct clk_hw *hw; 50 struct clk_core *core; 51 const char *fw_name; 52 const char *name; 53 int index; 54 }; 55 56 struct clk_core { 57 const char *name; 58 const struct clk_ops *ops; 59 struct clk_hw *hw; 60 struct module *owner; 61 struct device *dev; 62 struct device_node *of_node; 63 struct clk_core *parent; 64 struct clk_parent_map *parents; 65 u8 num_parents; 66 u8 new_parent_index; 67 unsigned long rate; 68 unsigned long req_rate; 69 unsigned long new_rate; 70 struct clk_core *new_parent; 71 struct clk_core *new_child; 72 unsigned long flags; 73 bool orphan; 74 bool rpm_enabled; 75 unsigned int enable_count; 76 unsigned int prepare_count; 77 unsigned int protect_count; 78 unsigned long min_rate; 79 unsigned long max_rate; 80 unsigned long accuracy; 81 int phase; 82 struct clk_duty duty; 83 struct hlist_head children; 84 struct hlist_node child_node; 85 struct hlist_head clks; 86 unsigned int notifier_count; 87 #ifdef CONFIG_DEBUG_FS 88 struct dentry *dentry; 89 struct hlist_node debug_node; 90 #endif 91 struct kref ref; 92 }; 93 94 #define CREATE_TRACE_POINTS 95 #include <trace/events/clk.h> 96 97 struct clk { 98 struct clk_core *core; 99 struct device *dev; 100 const char *dev_id; 101 const char *con_id; 102 unsigned long min_rate; 103 unsigned long max_rate; 104 unsigned int exclusive_count; 105 struct hlist_node clks_node; 106 }; 107 108 /*** runtime pm ***/ 109 static int clk_pm_runtime_get(struct clk_core *core) 110 { 111 if (!core->rpm_enabled) 112 return 0; 113 114 return pm_runtime_resume_and_get(core->dev); 115 } 116 117 static void clk_pm_runtime_put(struct clk_core *core) 118 { 119 if (!core->rpm_enabled) 120 return; 121 122 pm_runtime_put_sync(core->dev); 123 } 124 125 /*** locking ***/ 126 static void clk_prepare_lock(void) 127 { 128 if (!mutex_trylock(&prepare_lock)) { 129 if (prepare_owner == current) { 130 prepare_refcnt++; 131 return; 132 } 133 mutex_lock(&prepare_lock); 134 } 135 WARN_ON_ONCE(prepare_owner != NULL); 136 WARN_ON_ONCE(prepare_refcnt != 0); 137 prepare_owner = current; 138 prepare_refcnt = 1; 139 } 140 141 static void clk_prepare_unlock(void) 142 { 143 WARN_ON_ONCE(prepare_owner != current); 144 WARN_ON_ONCE(prepare_refcnt == 0); 145 146 if (--prepare_refcnt) 147 return; 148 prepare_owner = NULL; 149 mutex_unlock(&prepare_lock); 150 } 151 152 static unsigned long clk_enable_lock(void) 153 __acquires(enable_lock) 154 { 155 unsigned long flags; 156 157 /* 158 * On UP systems, spin_trylock_irqsave() always returns true, even if 159 * we already hold the lock. So, in that case, we rely only on 160 * reference counting. 161 */ 162 if (!IS_ENABLED(CONFIG_SMP) || 163 !spin_trylock_irqsave(&enable_lock, flags)) { 164 if (enable_owner == current) { 165 enable_refcnt++; 166 __acquire(enable_lock); 167 if (!IS_ENABLED(CONFIG_SMP)) 168 local_save_flags(flags); 169 return flags; 170 } 171 spin_lock_irqsave(&enable_lock, flags); 172 } 173 WARN_ON_ONCE(enable_owner != NULL); 174 WARN_ON_ONCE(enable_refcnt != 0); 175 enable_owner = current; 176 enable_refcnt = 1; 177 return flags; 178 } 179 180 static void clk_enable_unlock(unsigned long flags) 181 __releases(enable_lock) 182 { 183 WARN_ON_ONCE(enable_owner != current); 184 WARN_ON_ONCE(enable_refcnt == 0); 185 186 if (--enable_refcnt) { 187 __release(enable_lock); 188 return; 189 } 190 enable_owner = NULL; 191 spin_unlock_irqrestore(&enable_lock, flags); 192 } 193 194 static bool clk_core_rate_is_protected(struct clk_core *core) 195 { 196 return core->protect_count; 197 } 198 199 static bool clk_core_is_prepared(struct clk_core *core) 200 { 201 bool ret = false; 202 203 /* 204 * .is_prepared is optional for clocks that can prepare 205 * fall back to software usage counter if it is missing 206 */ 207 if (!core->ops->is_prepared) 208 return core->prepare_count; 209 210 if (!clk_pm_runtime_get(core)) { 211 ret = core->ops->is_prepared(core->hw); 212 clk_pm_runtime_put(core); 213 } 214 215 return ret; 216 } 217 218 static bool clk_core_is_enabled(struct clk_core *core) 219 { 220 bool ret = false; 221 222 /* 223 * .is_enabled is only mandatory for clocks that gate 224 * fall back to software usage counter if .is_enabled is missing 225 */ 226 if (!core->ops->is_enabled) 227 return core->enable_count; 228 229 /* 230 * Check if clock controller's device is runtime active before 231 * calling .is_enabled callback. If not, assume that clock is 232 * disabled, because we might be called from atomic context, from 233 * which pm_runtime_get() is not allowed. 234 * This function is called mainly from clk_disable_unused_subtree, 235 * which ensures proper runtime pm activation of controller before 236 * taking enable spinlock, but the below check is needed if one tries 237 * to call it from other places. 238 */ 239 if (core->rpm_enabled) { 240 pm_runtime_get_noresume(core->dev); 241 if (!pm_runtime_active(core->dev)) { 242 ret = false; 243 goto done; 244 } 245 } 246 247 /* 248 * This could be called with the enable lock held, or from atomic 249 * context. If the parent isn't enabled already, we can't do 250 * anything here. We can also assume this clock isn't enabled. 251 */ 252 if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent) 253 if (!clk_core_is_enabled(core->parent)) { 254 ret = false; 255 goto done; 256 } 257 258 ret = core->ops->is_enabled(core->hw); 259 done: 260 if (core->rpm_enabled) 261 pm_runtime_put(core->dev); 262 263 return ret; 264 } 265 266 /*** helper functions ***/ 267 268 const char *__clk_get_name(const struct clk *clk) 269 { 270 return !clk ? NULL : clk->core->name; 271 } 272 EXPORT_SYMBOL_GPL(__clk_get_name); 273 274 const char *clk_hw_get_name(const struct clk_hw *hw) 275 { 276 return hw->core->name; 277 } 278 EXPORT_SYMBOL_GPL(clk_hw_get_name); 279 280 struct clk_hw *__clk_get_hw(struct clk *clk) 281 { 282 return !clk ? NULL : clk->core->hw; 283 } 284 EXPORT_SYMBOL_GPL(__clk_get_hw); 285 286 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 287 { 288 return hw->core->num_parents; 289 } 290 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 291 292 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 293 { 294 return hw->core->parent ? hw->core->parent->hw : NULL; 295 } 296 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 297 298 static struct clk_core *__clk_lookup_subtree(const char *name, 299 struct clk_core *core) 300 { 301 struct clk_core *child; 302 struct clk_core *ret; 303 304 if (!strcmp(core->name, name)) 305 return core; 306 307 hlist_for_each_entry(child, &core->children, child_node) { 308 ret = __clk_lookup_subtree(name, child); 309 if (ret) 310 return ret; 311 } 312 313 return NULL; 314 } 315 316 static struct clk_core *clk_core_lookup(const char *name) 317 { 318 struct clk_core *root_clk; 319 struct clk_core *ret; 320 321 if (!name) 322 return NULL; 323 324 /* search the 'proper' clk tree first */ 325 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 326 ret = __clk_lookup_subtree(name, root_clk); 327 if (ret) 328 return ret; 329 } 330 331 /* if not found, then search the orphan tree */ 332 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 333 ret = __clk_lookup_subtree(name, root_clk); 334 if (ret) 335 return ret; 336 } 337 338 return NULL; 339 } 340 341 #ifdef CONFIG_OF 342 static int of_parse_clkspec(const struct device_node *np, int index, 343 const char *name, struct of_phandle_args *out_args); 344 static struct clk_hw * 345 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); 346 #else 347 static inline int of_parse_clkspec(const struct device_node *np, int index, 348 const char *name, 349 struct of_phandle_args *out_args) 350 { 351 return -ENOENT; 352 } 353 static inline struct clk_hw * 354 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 355 { 356 return ERR_PTR(-ENOENT); 357 } 358 #endif 359 360 /** 361 * clk_core_get - Find the clk_core parent of a clk 362 * @core: clk to find parent of 363 * @p_index: parent index to search for 364 * 365 * This is the preferred method for clk providers to find the parent of a 366 * clk when that parent is external to the clk controller. The parent_names 367 * array is indexed and treated as a local name matching a string in the device 368 * node's 'clock-names' property or as the 'con_id' matching the device's 369 * dev_name() in a clk_lookup. This allows clk providers to use their own 370 * namespace instead of looking for a globally unique parent string. 371 * 372 * For example the following DT snippet would allow a clock registered by the 373 * clock-controller@c001 that has a clk_init_data::parent_data array 374 * with 'xtal' in the 'name' member to find the clock provided by the 375 * clock-controller@f00abcd without needing to get the globally unique name of 376 * the xtal clk. 377 * 378 * parent: clock-controller@f00abcd { 379 * reg = <0xf00abcd 0xabcd>; 380 * #clock-cells = <0>; 381 * }; 382 * 383 * clock-controller@c001 { 384 * reg = <0xc001 0xf00d>; 385 * clocks = <&parent>; 386 * clock-names = "xtal"; 387 * #clock-cells = <1>; 388 * }; 389 * 390 * Returns: -ENOENT when the provider can't be found or the clk doesn't 391 * exist in the provider or the name can't be found in the DT node or 392 * in a clkdev lookup. NULL when the provider knows about the clk but it 393 * isn't provided on this system. 394 * A valid clk_core pointer when the clk can be found in the provider. 395 */ 396 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 397 { 398 const char *name = core->parents[p_index].fw_name; 399 int index = core->parents[p_index].index; 400 struct clk_hw *hw = ERR_PTR(-ENOENT); 401 struct device *dev = core->dev; 402 const char *dev_id = dev ? dev_name(dev) : NULL; 403 struct device_node *np = core->of_node; 404 struct of_phandle_args clkspec; 405 406 if (np && (name || index >= 0) && 407 !of_parse_clkspec(np, index, name, &clkspec)) { 408 hw = of_clk_get_hw_from_clkspec(&clkspec); 409 of_node_put(clkspec.np); 410 } else if (name) { 411 /* 412 * If the DT search above couldn't find the provider fallback to 413 * looking up via clkdev based clk_lookups. 414 */ 415 hw = clk_find_hw(dev_id, name); 416 } 417 418 if (IS_ERR(hw)) 419 return ERR_CAST(hw); 420 421 if (!hw) 422 return NULL; 423 424 return hw->core; 425 } 426 427 static void clk_core_fill_parent_index(struct clk_core *core, u8 index) 428 { 429 struct clk_parent_map *entry = &core->parents[index]; 430 struct clk_core *parent; 431 432 if (entry->hw) { 433 parent = entry->hw->core; 434 } else { 435 parent = clk_core_get(core, index); 436 if (PTR_ERR(parent) == -ENOENT && entry->name) 437 parent = clk_core_lookup(entry->name); 438 } 439 440 /* 441 * We have a direct reference but it isn't registered yet? 442 * Orphan it and let clk_reparent() update the orphan status 443 * when the parent is registered. 444 */ 445 if (!parent) 446 parent = ERR_PTR(-EPROBE_DEFER); 447 448 /* Only cache it if it's not an error */ 449 if (!IS_ERR(parent)) 450 entry->core = parent; 451 } 452 453 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 454 u8 index) 455 { 456 if (!core || index >= core->num_parents || !core->parents) 457 return NULL; 458 459 if (!core->parents[index].core) 460 clk_core_fill_parent_index(core, index); 461 462 return core->parents[index].core; 463 } 464 465 struct clk_hw * 466 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 467 { 468 struct clk_core *parent; 469 470 parent = clk_core_get_parent_by_index(hw->core, index); 471 472 return !parent ? NULL : parent->hw; 473 } 474 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 475 476 unsigned int __clk_get_enable_count(struct clk *clk) 477 { 478 return !clk ? 0 : clk->core->enable_count; 479 } 480 481 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 482 { 483 if (!core) 484 return 0; 485 486 if (!core->num_parents || core->parent) 487 return core->rate; 488 489 /* 490 * Clk must have a parent because num_parents > 0 but the parent isn't 491 * known yet. Best to return 0 as the rate of this clk until we can 492 * properly recalc the rate based on the parent's rate. 493 */ 494 return 0; 495 } 496 497 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 498 { 499 return clk_core_get_rate_nolock(hw->core); 500 } 501 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 502 503 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) 504 { 505 if (!core) 506 return 0; 507 508 return core->accuracy; 509 } 510 511 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 512 { 513 return hw->core->flags; 514 } 515 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 516 517 bool clk_hw_is_prepared(const struct clk_hw *hw) 518 { 519 return clk_core_is_prepared(hw->core); 520 } 521 EXPORT_SYMBOL_GPL(clk_hw_is_prepared); 522 523 bool clk_hw_rate_is_protected(const struct clk_hw *hw) 524 { 525 return clk_core_rate_is_protected(hw->core); 526 } 527 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); 528 529 bool clk_hw_is_enabled(const struct clk_hw *hw) 530 { 531 return clk_core_is_enabled(hw->core); 532 } 533 EXPORT_SYMBOL_GPL(clk_hw_is_enabled); 534 535 bool __clk_is_enabled(struct clk *clk) 536 { 537 if (!clk) 538 return false; 539 540 return clk_core_is_enabled(clk->core); 541 } 542 EXPORT_SYMBOL_GPL(__clk_is_enabled); 543 544 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 545 unsigned long best, unsigned long flags) 546 { 547 if (flags & CLK_MUX_ROUND_CLOSEST) 548 return abs(now - rate) < abs(best - rate); 549 550 return now <= rate && now > best; 551 } 552 553 static void clk_core_init_rate_req(struct clk_core * const core, 554 struct clk_rate_request *req, 555 unsigned long rate); 556 557 static int clk_core_round_rate_nolock(struct clk_core *core, 558 struct clk_rate_request *req); 559 560 static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent) 561 { 562 struct clk_core *tmp; 563 unsigned int i; 564 565 /* Optimize for the case where the parent is already the parent. */ 566 if (core->parent == parent) 567 return true; 568 569 for (i = 0; i < core->num_parents; i++) { 570 tmp = clk_core_get_parent_by_index(core, i); 571 if (!tmp) 572 continue; 573 574 if (tmp == parent) 575 return true; 576 } 577 578 return false; 579 } 580 581 static void 582 clk_core_forward_rate_req(struct clk_core *core, 583 const struct clk_rate_request *old_req, 584 struct clk_core *parent, 585 struct clk_rate_request *req, 586 unsigned long parent_rate) 587 { 588 if (WARN_ON(!clk_core_has_parent(core, parent))) 589 return; 590 591 clk_core_init_rate_req(parent, req, parent_rate); 592 593 if (req->min_rate < old_req->min_rate) 594 req->min_rate = old_req->min_rate; 595 596 if (req->max_rate > old_req->max_rate) 597 req->max_rate = old_req->max_rate; 598 } 599 600 static int 601 clk_core_determine_rate_no_reparent(struct clk_hw *hw, 602 struct clk_rate_request *req) 603 { 604 struct clk_core *core = hw->core; 605 struct clk_core *parent = core->parent; 606 unsigned long best; 607 int ret; 608 609 if (core->flags & CLK_SET_RATE_PARENT) { 610 struct clk_rate_request parent_req; 611 612 if (!parent) { 613 req->rate = 0; 614 return 0; 615 } 616 617 clk_core_forward_rate_req(core, req, parent, &parent_req, 618 req->rate); 619 620 trace_clk_rate_request_start(&parent_req); 621 622 ret = clk_core_round_rate_nolock(parent, &parent_req); 623 if (ret) 624 return ret; 625 626 trace_clk_rate_request_done(&parent_req); 627 628 best = parent_req.rate; 629 } else if (parent) { 630 best = clk_core_get_rate_nolock(parent); 631 } else { 632 best = clk_core_get_rate_nolock(core); 633 } 634 635 req->best_parent_rate = best; 636 req->rate = best; 637 638 return 0; 639 } 640 641 int clk_mux_determine_rate_flags(struct clk_hw *hw, 642 struct clk_rate_request *req, 643 unsigned long flags) 644 { 645 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 646 int i, num_parents, ret; 647 unsigned long best = 0; 648 649 /* if NO_REPARENT flag set, pass through to current parent */ 650 if (core->flags & CLK_SET_RATE_NO_REPARENT) 651 return clk_core_determine_rate_no_reparent(hw, req); 652 653 /* find the parent that can provide the fastest rate <= rate */ 654 num_parents = core->num_parents; 655 for (i = 0; i < num_parents; i++) { 656 unsigned long parent_rate; 657 658 parent = clk_core_get_parent_by_index(core, i); 659 if (!parent) 660 continue; 661 662 if (core->flags & CLK_SET_RATE_PARENT) { 663 struct clk_rate_request parent_req; 664 665 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); 666 667 trace_clk_rate_request_start(&parent_req); 668 669 ret = clk_core_round_rate_nolock(parent, &parent_req); 670 if (ret) 671 continue; 672 673 trace_clk_rate_request_done(&parent_req); 674 675 parent_rate = parent_req.rate; 676 } else { 677 parent_rate = clk_core_get_rate_nolock(parent); 678 } 679 680 if (mux_is_better_rate(req->rate, parent_rate, 681 best, flags)) { 682 best_parent = parent; 683 best = parent_rate; 684 } 685 } 686 687 if (!best_parent) 688 return -EINVAL; 689 690 req->best_parent_hw = best_parent->hw; 691 req->best_parent_rate = best; 692 req->rate = best; 693 694 return 0; 695 } 696 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); 697 698 struct clk *__clk_lookup(const char *name) 699 { 700 struct clk_core *core = clk_core_lookup(name); 701 702 return !core ? NULL : core->hw->clk; 703 } 704 705 static void clk_core_get_boundaries(struct clk_core *core, 706 unsigned long *min_rate, 707 unsigned long *max_rate) 708 { 709 struct clk *clk_user; 710 711 lockdep_assert_held(&prepare_lock); 712 713 *min_rate = core->min_rate; 714 *max_rate = core->max_rate; 715 716 hlist_for_each_entry(clk_user, &core->clks, clks_node) 717 *min_rate = max(*min_rate, clk_user->min_rate); 718 719 hlist_for_each_entry(clk_user, &core->clks, clks_node) 720 *max_rate = min(*max_rate, clk_user->max_rate); 721 } 722 723 /* 724 * clk_hw_get_rate_range() - returns the clock rate range for a hw clk 725 * @hw: the hw clk we want to get the range from 726 * @min_rate: pointer to the variable that will hold the minimum 727 * @max_rate: pointer to the variable that will hold the maximum 728 * 729 * Fills the @min_rate and @max_rate variables with the minimum and 730 * maximum that clock can reach. 731 */ 732 void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate, 733 unsigned long *max_rate) 734 { 735 clk_core_get_boundaries(hw->core, min_rate, max_rate); 736 } 737 EXPORT_SYMBOL_GPL(clk_hw_get_rate_range); 738 739 static bool clk_core_check_boundaries(struct clk_core *core, 740 unsigned long min_rate, 741 unsigned long max_rate) 742 { 743 struct clk *user; 744 745 lockdep_assert_held(&prepare_lock); 746 747 if (min_rate > core->max_rate || max_rate < core->min_rate) 748 return false; 749 750 hlist_for_each_entry(user, &core->clks, clks_node) 751 if (min_rate > user->max_rate || max_rate < user->min_rate) 752 return false; 753 754 return true; 755 } 756 757 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 758 unsigned long max_rate) 759 { 760 hw->core->min_rate = min_rate; 761 hw->core->max_rate = max_rate; 762 } 763 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 764 765 /* 766 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk 767 * @hw: mux type clk to determine rate on 768 * @req: rate request, also used to return preferred parent and frequencies 769 * 770 * Helper for finding best parent to provide a given frequency. This can be used 771 * directly as a determine_rate callback (e.g. for a mux), or from a more 772 * complex clock that may combine a mux with other operations. 773 * 774 * Returns: 0 on success, -EERROR value on error 775 */ 776 int __clk_mux_determine_rate(struct clk_hw *hw, 777 struct clk_rate_request *req) 778 { 779 return clk_mux_determine_rate_flags(hw, req, 0); 780 } 781 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 782 783 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 784 struct clk_rate_request *req) 785 { 786 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 787 } 788 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 789 790 /* 791 * clk_hw_determine_rate_no_reparent - clk_ops::determine_rate implementation for a clk that doesn't reparent 792 * @hw: mux type clk to determine rate on 793 * @req: rate request, also used to return preferred frequency 794 * 795 * Helper for finding best parent rate to provide a given frequency. 796 * This can be used directly as a determine_rate callback (e.g. for a 797 * mux), or from a more complex clock that may combine a mux with other 798 * operations. 799 * 800 * Returns: 0 on success, -EERROR value on error 801 */ 802 int clk_hw_determine_rate_no_reparent(struct clk_hw *hw, 803 struct clk_rate_request *req) 804 { 805 return clk_core_determine_rate_no_reparent(hw, req); 806 } 807 EXPORT_SYMBOL_GPL(clk_hw_determine_rate_no_reparent); 808 809 /*** clk api ***/ 810 811 static void clk_core_rate_unprotect(struct clk_core *core) 812 { 813 lockdep_assert_held(&prepare_lock); 814 815 if (!core) 816 return; 817 818 if (WARN(core->protect_count == 0, 819 "%s already unprotected\n", core->name)) 820 return; 821 822 if (--core->protect_count > 0) 823 return; 824 825 clk_core_rate_unprotect(core->parent); 826 } 827 828 static int clk_core_rate_nuke_protect(struct clk_core *core) 829 { 830 int ret; 831 832 lockdep_assert_held(&prepare_lock); 833 834 if (!core) 835 return -EINVAL; 836 837 if (core->protect_count == 0) 838 return 0; 839 840 ret = core->protect_count; 841 core->protect_count = 1; 842 clk_core_rate_unprotect(core); 843 844 return ret; 845 } 846 847 /** 848 * clk_rate_exclusive_put - release exclusivity over clock rate control 849 * @clk: the clk over which the exclusivity is released 850 * 851 * clk_rate_exclusive_put() completes a critical section during which a clock 852 * consumer cannot tolerate any other consumer making any operation on the 853 * clock which could result in a rate change or rate glitch. Exclusive clocks 854 * cannot have their rate changed, either directly or indirectly due to changes 855 * further up the parent chain of clocks. As a result, clocks up parent chain 856 * also get under exclusive control of the calling consumer. 857 * 858 * If exlusivity is claimed more than once on clock, even by the same consumer, 859 * the rate effectively gets locked as exclusivity can't be preempted. 860 * 861 * Calls to clk_rate_exclusive_put() must be balanced with calls to 862 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return 863 * error status. 864 */ 865 void clk_rate_exclusive_put(struct clk *clk) 866 { 867 if (!clk) 868 return; 869 870 clk_prepare_lock(); 871 872 /* 873 * if there is something wrong with this consumer protect count, stop 874 * here before messing with the provider 875 */ 876 if (WARN_ON(clk->exclusive_count <= 0)) 877 goto out; 878 879 clk_core_rate_unprotect(clk->core); 880 clk->exclusive_count--; 881 out: 882 clk_prepare_unlock(); 883 } 884 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); 885 886 static void clk_core_rate_protect(struct clk_core *core) 887 { 888 lockdep_assert_held(&prepare_lock); 889 890 if (!core) 891 return; 892 893 if (core->protect_count == 0) 894 clk_core_rate_protect(core->parent); 895 896 core->protect_count++; 897 } 898 899 static void clk_core_rate_restore_protect(struct clk_core *core, int count) 900 { 901 lockdep_assert_held(&prepare_lock); 902 903 if (!core) 904 return; 905 906 if (count == 0) 907 return; 908 909 clk_core_rate_protect(core); 910 core->protect_count = count; 911 } 912 913 /** 914 * clk_rate_exclusive_get - get exclusivity over the clk rate control 915 * @clk: the clk over which the exclusity of rate control is requested 916 * 917 * clk_rate_exclusive_get() begins a critical section during which a clock 918 * consumer cannot tolerate any other consumer making any operation on the 919 * clock which could result in a rate change or rate glitch. Exclusive clocks 920 * cannot have their rate changed, either directly or indirectly due to changes 921 * further up the parent chain of clocks. As a result, clocks up parent chain 922 * also get under exclusive control of the calling consumer. 923 * 924 * If exlusivity is claimed more than once on clock, even by the same consumer, 925 * the rate effectively gets locked as exclusivity can't be preempted. 926 * 927 * Calls to clk_rate_exclusive_get() should be balanced with calls to 928 * clk_rate_exclusive_put(). Calls to this function may sleep. 929 * Returns 0 on success, -EERROR otherwise 930 */ 931 int clk_rate_exclusive_get(struct clk *clk) 932 { 933 if (!clk) 934 return 0; 935 936 clk_prepare_lock(); 937 clk_core_rate_protect(clk->core); 938 clk->exclusive_count++; 939 clk_prepare_unlock(); 940 941 return 0; 942 } 943 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); 944 945 static void clk_core_unprepare(struct clk_core *core) 946 { 947 lockdep_assert_held(&prepare_lock); 948 949 if (!core) 950 return; 951 952 if (WARN(core->prepare_count == 0, 953 "%s already unprepared\n", core->name)) 954 return; 955 956 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, 957 "Unpreparing critical %s\n", core->name)) 958 return; 959 960 if (core->flags & CLK_SET_RATE_GATE) 961 clk_core_rate_unprotect(core); 962 963 if (--core->prepare_count > 0) 964 return; 965 966 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); 967 968 trace_clk_unprepare(core); 969 970 if (core->ops->unprepare) 971 core->ops->unprepare(core->hw); 972 973 trace_clk_unprepare_complete(core); 974 clk_core_unprepare(core->parent); 975 clk_pm_runtime_put(core); 976 } 977 978 static void clk_core_unprepare_lock(struct clk_core *core) 979 { 980 clk_prepare_lock(); 981 clk_core_unprepare(core); 982 clk_prepare_unlock(); 983 } 984 985 /** 986 * clk_unprepare - undo preparation of a clock source 987 * @clk: the clk being unprepared 988 * 989 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 990 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 991 * if the operation may sleep. One example is a clk which is accessed over 992 * I2c. In the complex case a clk gate operation may require a fast and a slow 993 * part. It is this reason that clk_unprepare and clk_disable are not mutually 994 * exclusive. In fact clk_disable must be called before clk_unprepare. 995 */ 996 void clk_unprepare(struct clk *clk) 997 { 998 if (IS_ERR_OR_NULL(clk)) 999 return; 1000 1001 clk_core_unprepare_lock(clk->core); 1002 } 1003 EXPORT_SYMBOL_GPL(clk_unprepare); 1004 1005 static int clk_core_prepare(struct clk_core *core) 1006 { 1007 int ret = 0; 1008 1009 lockdep_assert_held(&prepare_lock); 1010 1011 if (!core) 1012 return 0; 1013 1014 if (core->prepare_count == 0) { 1015 ret = clk_pm_runtime_get(core); 1016 if (ret) 1017 return ret; 1018 1019 ret = clk_core_prepare(core->parent); 1020 if (ret) 1021 goto runtime_put; 1022 1023 trace_clk_prepare(core); 1024 1025 if (core->ops->prepare) 1026 ret = core->ops->prepare(core->hw); 1027 1028 trace_clk_prepare_complete(core); 1029 1030 if (ret) 1031 goto unprepare; 1032 } 1033 1034 core->prepare_count++; 1035 1036 /* 1037 * CLK_SET_RATE_GATE is a special case of clock protection 1038 * Instead of a consumer claiming exclusive rate control, it is 1039 * actually the provider which prevents any consumer from making any 1040 * operation which could result in a rate change or rate glitch while 1041 * the clock is prepared. 1042 */ 1043 if (core->flags & CLK_SET_RATE_GATE) 1044 clk_core_rate_protect(core); 1045 1046 return 0; 1047 unprepare: 1048 clk_core_unprepare(core->parent); 1049 runtime_put: 1050 clk_pm_runtime_put(core); 1051 return ret; 1052 } 1053 1054 static int clk_core_prepare_lock(struct clk_core *core) 1055 { 1056 int ret; 1057 1058 clk_prepare_lock(); 1059 ret = clk_core_prepare(core); 1060 clk_prepare_unlock(); 1061 1062 return ret; 1063 } 1064 1065 /** 1066 * clk_prepare - prepare a clock source 1067 * @clk: the clk being prepared 1068 * 1069 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 1070 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 1071 * operation may sleep. One example is a clk which is accessed over I2c. In 1072 * the complex case a clk ungate operation may require a fast and a slow part. 1073 * It is this reason that clk_prepare and clk_enable are not mutually 1074 * exclusive. In fact clk_prepare must be called before clk_enable. 1075 * Returns 0 on success, -EERROR otherwise. 1076 */ 1077 int clk_prepare(struct clk *clk) 1078 { 1079 if (!clk) 1080 return 0; 1081 1082 return clk_core_prepare_lock(clk->core); 1083 } 1084 EXPORT_SYMBOL_GPL(clk_prepare); 1085 1086 static void clk_core_disable(struct clk_core *core) 1087 { 1088 lockdep_assert_held(&enable_lock); 1089 1090 if (!core) 1091 return; 1092 1093 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) 1094 return; 1095 1096 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, 1097 "Disabling critical %s\n", core->name)) 1098 return; 1099 1100 if (--core->enable_count > 0) 1101 return; 1102 1103 trace_clk_disable(core); 1104 1105 if (core->ops->disable) 1106 core->ops->disable(core->hw); 1107 1108 trace_clk_disable_complete(core); 1109 1110 clk_core_disable(core->parent); 1111 } 1112 1113 static void clk_core_disable_lock(struct clk_core *core) 1114 { 1115 unsigned long flags; 1116 1117 flags = clk_enable_lock(); 1118 clk_core_disable(core); 1119 clk_enable_unlock(flags); 1120 } 1121 1122 /** 1123 * clk_disable - gate a clock 1124 * @clk: the clk being gated 1125 * 1126 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 1127 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 1128 * clk if the operation is fast and will never sleep. One example is a 1129 * SoC-internal clk which is controlled via simple register writes. In the 1130 * complex case a clk gate operation may require a fast and a slow part. It is 1131 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 1132 * In fact clk_disable must be called before clk_unprepare. 1133 */ 1134 void clk_disable(struct clk *clk) 1135 { 1136 if (IS_ERR_OR_NULL(clk)) 1137 return; 1138 1139 clk_core_disable_lock(clk->core); 1140 } 1141 EXPORT_SYMBOL_GPL(clk_disable); 1142 1143 static int clk_core_enable(struct clk_core *core) 1144 { 1145 int ret = 0; 1146 1147 lockdep_assert_held(&enable_lock); 1148 1149 if (!core) 1150 return 0; 1151 1152 if (WARN(core->prepare_count == 0, 1153 "Enabling unprepared %s\n", core->name)) 1154 return -ESHUTDOWN; 1155 1156 if (core->enable_count == 0) { 1157 ret = clk_core_enable(core->parent); 1158 1159 if (ret) 1160 return ret; 1161 1162 trace_clk_enable(core); 1163 1164 if (core->ops->enable) 1165 ret = core->ops->enable(core->hw); 1166 1167 trace_clk_enable_complete(core); 1168 1169 if (ret) { 1170 clk_core_disable(core->parent); 1171 return ret; 1172 } 1173 } 1174 1175 core->enable_count++; 1176 return 0; 1177 } 1178 1179 static int clk_core_enable_lock(struct clk_core *core) 1180 { 1181 unsigned long flags; 1182 int ret; 1183 1184 flags = clk_enable_lock(); 1185 ret = clk_core_enable(core); 1186 clk_enable_unlock(flags); 1187 1188 return ret; 1189 } 1190 1191 /** 1192 * clk_gate_restore_context - restore context for poweroff 1193 * @hw: the clk_hw pointer of clock whose state is to be restored 1194 * 1195 * The clock gate restore context function enables or disables 1196 * the gate clocks based on the enable_count. This is done in cases 1197 * where the clock context is lost and based on the enable_count 1198 * the clock either needs to be enabled/disabled. This 1199 * helps restore the state of gate clocks. 1200 */ 1201 void clk_gate_restore_context(struct clk_hw *hw) 1202 { 1203 struct clk_core *core = hw->core; 1204 1205 if (core->enable_count) 1206 core->ops->enable(hw); 1207 else 1208 core->ops->disable(hw); 1209 } 1210 EXPORT_SYMBOL_GPL(clk_gate_restore_context); 1211 1212 static int clk_core_save_context(struct clk_core *core) 1213 { 1214 struct clk_core *child; 1215 int ret = 0; 1216 1217 hlist_for_each_entry(child, &core->children, child_node) { 1218 ret = clk_core_save_context(child); 1219 if (ret < 0) 1220 return ret; 1221 } 1222 1223 if (core->ops && core->ops->save_context) 1224 ret = core->ops->save_context(core->hw); 1225 1226 return ret; 1227 } 1228 1229 static void clk_core_restore_context(struct clk_core *core) 1230 { 1231 struct clk_core *child; 1232 1233 if (core->ops && core->ops->restore_context) 1234 core->ops->restore_context(core->hw); 1235 1236 hlist_for_each_entry(child, &core->children, child_node) 1237 clk_core_restore_context(child); 1238 } 1239 1240 /** 1241 * clk_save_context - save clock context for poweroff 1242 * 1243 * Saves the context of the clock register for powerstates in which the 1244 * contents of the registers will be lost. Occurs deep within the suspend 1245 * code. Returns 0 on success. 1246 */ 1247 int clk_save_context(void) 1248 { 1249 struct clk_core *clk; 1250 int ret; 1251 1252 hlist_for_each_entry(clk, &clk_root_list, child_node) { 1253 ret = clk_core_save_context(clk); 1254 if (ret < 0) 1255 return ret; 1256 } 1257 1258 hlist_for_each_entry(clk, &clk_orphan_list, child_node) { 1259 ret = clk_core_save_context(clk); 1260 if (ret < 0) 1261 return ret; 1262 } 1263 1264 return 0; 1265 } 1266 EXPORT_SYMBOL_GPL(clk_save_context); 1267 1268 /** 1269 * clk_restore_context - restore clock context after poweroff 1270 * 1271 * Restore the saved clock context upon resume. 1272 * 1273 */ 1274 void clk_restore_context(void) 1275 { 1276 struct clk_core *core; 1277 1278 hlist_for_each_entry(core, &clk_root_list, child_node) 1279 clk_core_restore_context(core); 1280 1281 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1282 clk_core_restore_context(core); 1283 } 1284 EXPORT_SYMBOL_GPL(clk_restore_context); 1285 1286 /** 1287 * clk_enable - ungate a clock 1288 * @clk: the clk being ungated 1289 * 1290 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 1291 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 1292 * if the operation will never sleep. One example is a SoC-internal clk which 1293 * is controlled via simple register writes. In the complex case a clk ungate 1294 * operation may require a fast and a slow part. It is this reason that 1295 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 1296 * must be called before clk_enable. Returns 0 on success, -EERROR 1297 * otherwise. 1298 */ 1299 int clk_enable(struct clk *clk) 1300 { 1301 if (!clk) 1302 return 0; 1303 1304 return clk_core_enable_lock(clk->core); 1305 } 1306 EXPORT_SYMBOL_GPL(clk_enable); 1307 1308 /** 1309 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 1310 * @clk: clock source 1311 * 1312 * Returns true if clk_prepare() implicitly enables the clock, effectively 1313 * making clk_enable()/clk_disable() no-ops, false otherwise. 1314 * 1315 * This is of interest mainly to power management code where actually 1316 * disabling the clock also requires unpreparing it to have any material 1317 * effect. 1318 * 1319 * Regardless of the value returned here, the caller must always invoke 1320 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 1321 * to be right. 1322 */ 1323 bool clk_is_enabled_when_prepared(struct clk *clk) 1324 { 1325 return clk && !(clk->core->ops->enable && clk->core->ops->disable); 1326 } 1327 EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared); 1328 1329 static int clk_core_prepare_enable(struct clk_core *core) 1330 { 1331 int ret; 1332 1333 ret = clk_core_prepare_lock(core); 1334 if (ret) 1335 return ret; 1336 1337 ret = clk_core_enable_lock(core); 1338 if (ret) 1339 clk_core_unprepare_lock(core); 1340 1341 return ret; 1342 } 1343 1344 static void clk_core_disable_unprepare(struct clk_core *core) 1345 { 1346 clk_core_disable_lock(core); 1347 clk_core_unprepare_lock(core); 1348 } 1349 1350 static void __init clk_unprepare_unused_subtree(struct clk_core *core) 1351 { 1352 struct clk_core *child; 1353 1354 lockdep_assert_held(&prepare_lock); 1355 1356 hlist_for_each_entry(child, &core->children, child_node) 1357 clk_unprepare_unused_subtree(child); 1358 1359 if (core->prepare_count) 1360 return; 1361 1362 if (core->flags & CLK_IGNORE_UNUSED) 1363 return; 1364 1365 if (clk_pm_runtime_get(core)) 1366 return; 1367 1368 if (clk_core_is_prepared(core)) { 1369 trace_clk_unprepare(core); 1370 if (core->ops->unprepare_unused) 1371 core->ops->unprepare_unused(core->hw); 1372 else if (core->ops->unprepare) 1373 core->ops->unprepare(core->hw); 1374 trace_clk_unprepare_complete(core); 1375 } 1376 1377 clk_pm_runtime_put(core); 1378 } 1379 1380 static void __init clk_disable_unused_subtree(struct clk_core *core) 1381 { 1382 struct clk_core *child; 1383 unsigned long flags; 1384 1385 lockdep_assert_held(&prepare_lock); 1386 1387 hlist_for_each_entry(child, &core->children, child_node) 1388 clk_disable_unused_subtree(child); 1389 1390 if (core->flags & CLK_OPS_PARENT_ENABLE) 1391 clk_core_prepare_enable(core->parent); 1392 1393 if (clk_pm_runtime_get(core)) 1394 goto unprepare_out; 1395 1396 flags = clk_enable_lock(); 1397 1398 if (core->enable_count) 1399 goto unlock_out; 1400 1401 if (core->flags & CLK_IGNORE_UNUSED) 1402 goto unlock_out; 1403 1404 /* 1405 * some gate clocks have special needs during the disable-unused 1406 * sequence. call .disable_unused if available, otherwise fall 1407 * back to .disable 1408 */ 1409 if (clk_core_is_enabled(core)) { 1410 trace_clk_disable(core); 1411 if (core->ops->disable_unused) 1412 core->ops->disable_unused(core->hw); 1413 else if (core->ops->disable) 1414 core->ops->disable(core->hw); 1415 trace_clk_disable_complete(core); 1416 } 1417 1418 unlock_out: 1419 clk_enable_unlock(flags); 1420 clk_pm_runtime_put(core); 1421 unprepare_out: 1422 if (core->flags & CLK_OPS_PARENT_ENABLE) 1423 clk_core_disable_unprepare(core->parent); 1424 } 1425 1426 static bool clk_ignore_unused __initdata; 1427 static int __init clk_ignore_unused_setup(char *__unused) 1428 { 1429 clk_ignore_unused = true; 1430 return 1; 1431 } 1432 __setup("clk_ignore_unused", clk_ignore_unused_setup); 1433 1434 static int __init clk_disable_unused(void) 1435 { 1436 struct clk_core *core; 1437 1438 if (clk_ignore_unused) { 1439 pr_warn("clk: Not disabling unused clocks\n"); 1440 return 0; 1441 } 1442 1443 pr_info("clk: Disabling unused clocks\n"); 1444 1445 clk_prepare_lock(); 1446 1447 hlist_for_each_entry(core, &clk_root_list, child_node) 1448 clk_disable_unused_subtree(core); 1449 1450 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1451 clk_disable_unused_subtree(core); 1452 1453 hlist_for_each_entry(core, &clk_root_list, child_node) 1454 clk_unprepare_unused_subtree(core); 1455 1456 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1457 clk_unprepare_unused_subtree(core); 1458 1459 clk_prepare_unlock(); 1460 1461 return 0; 1462 } 1463 late_initcall_sync(clk_disable_unused); 1464 1465 static int clk_core_determine_round_nolock(struct clk_core *core, 1466 struct clk_rate_request *req) 1467 { 1468 long rate; 1469 1470 lockdep_assert_held(&prepare_lock); 1471 1472 if (!core) 1473 return 0; 1474 1475 /* 1476 * Some clock providers hand-craft their clk_rate_requests and 1477 * might not fill min_rate and max_rate. 1478 * 1479 * If it's the case, clamping the rate is equivalent to setting 1480 * the rate to 0 which is bad. Skip the clamping but complain so 1481 * that it gets fixed, hopefully. 1482 */ 1483 if (!req->min_rate && !req->max_rate) 1484 pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n", 1485 __func__, core->name); 1486 else 1487 req->rate = clamp(req->rate, req->min_rate, req->max_rate); 1488 1489 /* 1490 * At this point, core protection will be disabled 1491 * - if the provider is not protected at all 1492 * - if the calling consumer is the only one which has exclusivity 1493 * over the provider 1494 */ 1495 if (clk_core_rate_is_protected(core)) { 1496 req->rate = core->rate; 1497 } else if (core->ops->determine_rate) { 1498 return core->ops->determine_rate(core->hw, req); 1499 } else if (core->ops->round_rate) { 1500 rate = core->ops->round_rate(core->hw, req->rate, 1501 &req->best_parent_rate); 1502 if (rate < 0) 1503 return rate; 1504 1505 req->rate = rate; 1506 } else { 1507 return -EINVAL; 1508 } 1509 1510 return 0; 1511 } 1512 1513 static void clk_core_init_rate_req(struct clk_core * const core, 1514 struct clk_rate_request *req, 1515 unsigned long rate) 1516 { 1517 struct clk_core *parent; 1518 1519 if (WARN_ON(!req)) 1520 return; 1521 1522 memset(req, 0, sizeof(*req)); 1523 req->max_rate = ULONG_MAX; 1524 1525 if (!core) 1526 return; 1527 1528 req->core = core; 1529 req->rate = rate; 1530 clk_core_get_boundaries(core, &req->min_rate, &req->max_rate); 1531 1532 parent = core->parent; 1533 if (parent) { 1534 req->best_parent_hw = parent->hw; 1535 req->best_parent_rate = parent->rate; 1536 } else { 1537 req->best_parent_hw = NULL; 1538 req->best_parent_rate = 0; 1539 } 1540 } 1541 1542 /** 1543 * clk_hw_init_rate_request - Initializes a clk_rate_request 1544 * @hw: the clk for which we want to submit a rate request 1545 * @req: the clk_rate_request structure we want to initialise 1546 * @rate: the rate which is to be requested 1547 * 1548 * Initializes a clk_rate_request structure to submit to 1549 * __clk_determine_rate() or similar functions. 1550 */ 1551 void clk_hw_init_rate_request(const struct clk_hw *hw, 1552 struct clk_rate_request *req, 1553 unsigned long rate) 1554 { 1555 if (WARN_ON(!hw || !req)) 1556 return; 1557 1558 clk_core_init_rate_req(hw->core, req, rate); 1559 } 1560 EXPORT_SYMBOL_GPL(clk_hw_init_rate_request); 1561 1562 /** 1563 * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent 1564 * @hw: the original clock that got the rate request 1565 * @old_req: the original clk_rate_request structure we want to forward 1566 * @parent: the clk we want to forward @old_req to 1567 * @req: the clk_rate_request structure we want to initialise 1568 * @parent_rate: The rate which is to be requested to @parent 1569 * 1570 * Initializes a clk_rate_request structure to submit to a clock parent 1571 * in __clk_determine_rate() or similar functions. 1572 */ 1573 void clk_hw_forward_rate_request(const struct clk_hw *hw, 1574 const struct clk_rate_request *old_req, 1575 const struct clk_hw *parent, 1576 struct clk_rate_request *req, 1577 unsigned long parent_rate) 1578 { 1579 if (WARN_ON(!hw || !old_req || !parent || !req)) 1580 return; 1581 1582 clk_core_forward_rate_req(hw->core, old_req, 1583 parent->core, req, 1584 parent_rate); 1585 } 1586 EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request); 1587 1588 static bool clk_core_can_round(struct clk_core * const core) 1589 { 1590 return core->ops->determine_rate || core->ops->round_rate; 1591 } 1592 1593 static int clk_core_round_rate_nolock(struct clk_core *core, 1594 struct clk_rate_request *req) 1595 { 1596 int ret; 1597 1598 lockdep_assert_held(&prepare_lock); 1599 1600 if (!core) { 1601 req->rate = 0; 1602 return 0; 1603 } 1604 1605 if (clk_core_can_round(core)) 1606 return clk_core_determine_round_nolock(core, req); 1607 1608 if (core->flags & CLK_SET_RATE_PARENT) { 1609 struct clk_rate_request parent_req; 1610 1611 clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate); 1612 1613 trace_clk_rate_request_start(&parent_req); 1614 1615 ret = clk_core_round_rate_nolock(core->parent, &parent_req); 1616 if (ret) 1617 return ret; 1618 1619 trace_clk_rate_request_done(&parent_req); 1620 1621 req->best_parent_rate = parent_req.rate; 1622 req->rate = parent_req.rate; 1623 1624 return 0; 1625 } 1626 1627 req->rate = core->rate; 1628 return 0; 1629 } 1630 1631 /** 1632 * __clk_determine_rate - get the closest rate actually supported by a clock 1633 * @hw: determine the rate of this clock 1634 * @req: target rate request 1635 * 1636 * Useful for clk_ops such as .set_rate and .determine_rate. 1637 */ 1638 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 1639 { 1640 if (!hw) { 1641 req->rate = 0; 1642 return 0; 1643 } 1644 1645 return clk_core_round_rate_nolock(hw->core, req); 1646 } 1647 EXPORT_SYMBOL_GPL(__clk_determine_rate); 1648 1649 /** 1650 * clk_hw_round_rate() - round the given rate for a hw clk 1651 * @hw: the hw clk for which we are rounding a rate 1652 * @rate: the rate which is to be rounded 1653 * 1654 * Takes in a rate as input and rounds it to a rate that the clk can actually 1655 * use. 1656 * 1657 * Context: prepare_lock must be held. 1658 * For clk providers to call from within clk_ops such as .round_rate, 1659 * .determine_rate. 1660 * 1661 * Return: returns rounded rate of hw clk if clk supports round_rate operation 1662 * else returns the parent rate. 1663 */ 1664 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 1665 { 1666 int ret; 1667 struct clk_rate_request req; 1668 1669 clk_core_init_rate_req(hw->core, &req, rate); 1670 1671 trace_clk_rate_request_start(&req); 1672 1673 ret = clk_core_round_rate_nolock(hw->core, &req); 1674 if (ret) 1675 return 0; 1676 1677 trace_clk_rate_request_done(&req); 1678 1679 return req.rate; 1680 } 1681 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 1682 1683 /** 1684 * clk_round_rate - round the given rate for a clk 1685 * @clk: the clk for which we are rounding a rate 1686 * @rate: the rate which is to be rounded 1687 * 1688 * Takes in a rate as input and rounds it to a rate that the clk can actually 1689 * use which is then returned. If clk doesn't support round_rate operation 1690 * then the parent rate is returned. 1691 */ 1692 long clk_round_rate(struct clk *clk, unsigned long rate) 1693 { 1694 struct clk_rate_request req; 1695 int ret; 1696 1697 if (!clk) 1698 return 0; 1699 1700 clk_prepare_lock(); 1701 1702 if (clk->exclusive_count) 1703 clk_core_rate_unprotect(clk->core); 1704 1705 clk_core_init_rate_req(clk->core, &req, rate); 1706 1707 trace_clk_rate_request_start(&req); 1708 1709 ret = clk_core_round_rate_nolock(clk->core, &req); 1710 1711 trace_clk_rate_request_done(&req); 1712 1713 if (clk->exclusive_count) 1714 clk_core_rate_protect(clk->core); 1715 1716 clk_prepare_unlock(); 1717 1718 if (ret) 1719 return ret; 1720 1721 return req.rate; 1722 } 1723 EXPORT_SYMBOL_GPL(clk_round_rate); 1724 1725 /** 1726 * __clk_notify - call clk notifier chain 1727 * @core: clk that is changing rate 1728 * @msg: clk notifier type (see include/linux/clk.h) 1729 * @old_rate: old clk rate 1730 * @new_rate: new clk rate 1731 * 1732 * Triggers a notifier call chain on the clk rate-change notification 1733 * for 'clk'. Passes a pointer to the struct clk and the previous 1734 * and current rates to the notifier callback. Intended to be called by 1735 * internal clock code only. Returns NOTIFY_DONE from the last driver 1736 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1737 * a driver returns that. 1738 */ 1739 static int __clk_notify(struct clk_core *core, unsigned long msg, 1740 unsigned long old_rate, unsigned long new_rate) 1741 { 1742 struct clk_notifier *cn; 1743 struct clk_notifier_data cnd; 1744 int ret = NOTIFY_DONE; 1745 1746 cnd.old_rate = old_rate; 1747 cnd.new_rate = new_rate; 1748 1749 list_for_each_entry(cn, &clk_notifier_list, node) { 1750 if (cn->clk->core == core) { 1751 cnd.clk = cn->clk; 1752 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1753 &cnd); 1754 if (ret & NOTIFY_STOP_MASK) 1755 return ret; 1756 } 1757 } 1758 1759 return ret; 1760 } 1761 1762 /** 1763 * __clk_recalc_accuracies 1764 * @core: first clk in the subtree 1765 * 1766 * Walks the subtree of clks starting with clk and recalculates accuracies as 1767 * it goes. Note that if a clk does not implement the .recalc_accuracy 1768 * callback then it is assumed that the clock will take on the accuracy of its 1769 * parent. 1770 */ 1771 static void __clk_recalc_accuracies(struct clk_core *core) 1772 { 1773 unsigned long parent_accuracy = 0; 1774 struct clk_core *child; 1775 1776 lockdep_assert_held(&prepare_lock); 1777 1778 if (core->parent) 1779 parent_accuracy = core->parent->accuracy; 1780 1781 if (core->ops->recalc_accuracy) 1782 core->accuracy = core->ops->recalc_accuracy(core->hw, 1783 parent_accuracy); 1784 else 1785 core->accuracy = parent_accuracy; 1786 1787 hlist_for_each_entry(child, &core->children, child_node) 1788 __clk_recalc_accuracies(child); 1789 } 1790 1791 static long clk_core_get_accuracy_recalc(struct clk_core *core) 1792 { 1793 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1794 __clk_recalc_accuracies(core); 1795 1796 return clk_core_get_accuracy_no_lock(core); 1797 } 1798 1799 /** 1800 * clk_get_accuracy - return the accuracy of clk 1801 * @clk: the clk whose accuracy is being returned 1802 * 1803 * Simply returns the cached accuracy of the clk, unless 1804 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1805 * issued. 1806 * If clk is NULL then returns 0. 1807 */ 1808 long clk_get_accuracy(struct clk *clk) 1809 { 1810 long accuracy; 1811 1812 if (!clk) 1813 return 0; 1814 1815 clk_prepare_lock(); 1816 accuracy = clk_core_get_accuracy_recalc(clk->core); 1817 clk_prepare_unlock(); 1818 1819 return accuracy; 1820 } 1821 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1822 1823 static unsigned long clk_recalc(struct clk_core *core, 1824 unsigned long parent_rate) 1825 { 1826 unsigned long rate = parent_rate; 1827 1828 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1829 rate = core->ops->recalc_rate(core->hw, parent_rate); 1830 clk_pm_runtime_put(core); 1831 } 1832 return rate; 1833 } 1834 1835 /** 1836 * __clk_recalc_rates 1837 * @core: first clk in the subtree 1838 * @update_req: Whether req_rate should be updated with the new rate 1839 * @msg: notification type (see include/linux/clk.h) 1840 * 1841 * Walks the subtree of clks starting with clk and recalculates rates as it 1842 * goes. Note that if a clk does not implement the .recalc_rate callback then 1843 * it is assumed that the clock will take on the rate of its parent. 1844 * 1845 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1846 * if necessary. 1847 */ 1848 static void __clk_recalc_rates(struct clk_core *core, bool update_req, 1849 unsigned long msg) 1850 { 1851 unsigned long old_rate; 1852 unsigned long parent_rate = 0; 1853 struct clk_core *child; 1854 1855 lockdep_assert_held(&prepare_lock); 1856 1857 old_rate = core->rate; 1858 1859 if (core->parent) 1860 parent_rate = core->parent->rate; 1861 1862 core->rate = clk_recalc(core, parent_rate); 1863 if (update_req) 1864 core->req_rate = core->rate; 1865 1866 /* 1867 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1868 * & ABORT_RATE_CHANGE notifiers 1869 */ 1870 if (core->notifier_count && msg) 1871 __clk_notify(core, msg, old_rate, core->rate); 1872 1873 hlist_for_each_entry(child, &core->children, child_node) 1874 __clk_recalc_rates(child, update_req, msg); 1875 } 1876 1877 static unsigned long clk_core_get_rate_recalc(struct clk_core *core) 1878 { 1879 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1880 __clk_recalc_rates(core, false, 0); 1881 1882 return clk_core_get_rate_nolock(core); 1883 } 1884 1885 /** 1886 * clk_get_rate - return the rate of clk 1887 * @clk: the clk whose rate is being returned 1888 * 1889 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1890 * is set, which means a recalc_rate will be issued. Can be called regardless of 1891 * the clock enabledness. If clk is NULL, or if an error occurred, then returns 1892 * 0. 1893 */ 1894 unsigned long clk_get_rate(struct clk *clk) 1895 { 1896 unsigned long rate; 1897 1898 if (!clk) 1899 return 0; 1900 1901 clk_prepare_lock(); 1902 rate = clk_core_get_rate_recalc(clk->core); 1903 clk_prepare_unlock(); 1904 1905 return rate; 1906 } 1907 EXPORT_SYMBOL_GPL(clk_get_rate); 1908 1909 static int clk_fetch_parent_index(struct clk_core *core, 1910 struct clk_core *parent) 1911 { 1912 int i; 1913 1914 if (!parent) 1915 return -EINVAL; 1916 1917 for (i = 0; i < core->num_parents; i++) { 1918 /* Found it first try! */ 1919 if (core->parents[i].core == parent) 1920 return i; 1921 1922 /* Something else is here, so keep looking */ 1923 if (core->parents[i].core) 1924 continue; 1925 1926 /* Maybe core hasn't been cached but the hw is all we know? */ 1927 if (core->parents[i].hw) { 1928 if (core->parents[i].hw == parent->hw) 1929 break; 1930 1931 /* Didn't match, but we're expecting a clk_hw */ 1932 continue; 1933 } 1934 1935 /* Maybe it hasn't been cached (clk_set_parent() path) */ 1936 if (parent == clk_core_get(core, i)) 1937 break; 1938 1939 /* Fallback to comparing globally unique names */ 1940 if (core->parents[i].name && 1941 !strcmp(parent->name, core->parents[i].name)) 1942 break; 1943 } 1944 1945 if (i == core->num_parents) 1946 return -EINVAL; 1947 1948 core->parents[i].core = parent; 1949 return i; 1950 } 1951 1952 /** 1953 * clk_hw_get_parent_index - return the index of the parent clock 1954 * @hw: clk_hw associated with the clk being consumed 1955 * 1956 * Fetches and returns the index of parent clock. Returns -EINVAL if the given 1957 * clock does not have a current parent. 1958 */ 1959 int clk_hw_get_parent_index(struct clk_hw *hw) 1960 { 1961 struct clk_hw *parent = clk_hw_get_parent(hw); 1962 1963 if (WARN_ON(parent == NULL)) 1964 return -EINVAL; 1965 1966 return clk_fetch_parent_index(hw->core, parent->core); 1967 } 1968 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index); 1969 1970 /* 1971 * Update the orphan status of @core and all its children. 1972 */ 1973 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1974 { 1975 struct clk_core *child; 1976 1977 core->orphan = is_orphan; 1978 1979 hlist_for_each_entry(child, &core->children, child_node) 1980 clk_core_update_orphan_status(child, is_orphan); 1981 } 1982 1983 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1984 { 1985 bool was_orphan = core->orphan; 1986 1987 hlist_del(&core->child_node); 1988 1989 if (new_parent) { 1990 bool becomes_orphan = new_parent->orphan; 1991 1992 /* avoid duplicate POST_RATE_CHANGE notifications */ 1993 if (new_parent->new_child == core) 1994 new_parent->new_child = NULL; 1995 1996 hlist_add_head(&core->child_node, &new_parent->children); 1997 1998 if (was_orphan != becomes_orphan) 1999 clk_core_update_orphan_status(core, becomes_orphan); 2000 } else { 2001 hlist_add_head(&core->child_node, &clk_orphan_list); 2002 if (!was_orphan) 2003 clk_core_update_orphan_status(core, true); 2004 } 2005 2006 core->parent = new_parent; 2007 } 2008 2009 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 2010 struct clk_core *parent) 2011 { 2012 unsigned long flags; 2013 struct clk_core *old_parent = core->parent; 2014 2015 /* 2016 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 2017 * 2018 * 2. Migrate prepare state between parents and prevent race with 2019 * clk_enable(). 2020 * 2021 * If the clock is not prepared, then a race with 2022 * clk_enable/disable() is impossible since we already have the 2023 * prepare lock (future calls to clk_enable() need to be preceded by 2024 * a clk_prepare()). 2025 * 2026 * If the clock is prepared, migrate the prepared state to the new 2027 * parent and also protect against a race with clk_enable() by 2028 * forcing the clock and the new parent on. This ensures that all 2029 * future calls to clk_enable() are practically NOPs with respect to 2030 * hardware and software states. 2031 * 2032 * See also: Comment for clk_set_parent() below. 2033 */ 2034 2035 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 2036 if (core->flags & CLK_OPS_PARENT_ENABLE) { 2037 clk_core_prepare_enable(old_parent); 2038 clk_core_prepare_enable(parent); 2039 } 2040 2041 /* migrate prepare count if > 0 */ 2042 if (core->prepare_count) { 2043 clk_core_prepare_enable(parent); 2044 clk_core_enable_lock(core); 2045 } 2046 2047 /* update the clk tree topology */ 2048 flags = clk_enable_lock(); 2049 clk_reparent(core, parent); 2050 clk_enable_unlock(flags); 2051 2052 return old_parent; 2053 } 2054 2055 static void __clk_set_parent_after(struct clk_core *core, 2056 struct clk_core *parent, 2057 struct clk_core *old_parent) 2058 { 2059 /* 2060 * Finish the migration of prepare state and undo the changes done 2061 * for preventing a race with clk_enable(). 2062 */ 2063 if (core->prepare_count) { 2064 clk_core_disable_lock(core); 2065 clk_core_disable_unprepare(old_parent); 2066 } 2067 2068 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 2069 if (core->flags & CLK_OPS_PARENT_ENABLE) { 2070 clk_core_disable_unprepare(parent); 2071 clk_core_disable_unprepare(old_parent); 2072 } 2073 } 2074 2075 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 2076 u8 p_index) 2077 { 2078 unsigned long flags; 2079 int ret = 0; 2080 struct clk_core *old_parent; 2081 2082 old_parent = __clk_set_parent_before(core, parent); 2083 2084 trace_clk_set_parent(core, parent); 2085 2086 /* change clock input source */ 2087 if (parent && core->ops->set_parent) 2088 ret = core->ops->set_parent(core->hw, p_index); 2089 2090 trace_clk_set_parent_complete(core, parent); 2091 2092 if (ret) { 2093 flags = clk_enable_lock(); 2094 clk_reparent(core, old_parent); 2095 clk_enable_unlock(flags); 2096 2097 __clk_set_parent_after(core, old_parent, parent); 2098 2099 return ret; 2100 } 2101 2102 __clk_set_parent_after(core, parent, old_parent); 2103 2104 return 0; 2105 } 2106 2107 /** 2108 * __clk_speculate_rates 2109 * @core: first clk in the subtree 2110 * @parent_rate: the "future" rate of clk's parent 2111 * 2112 * Walks the subtree of clks starting with clk, speculating rates as it 2113 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 2114 * 2115 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 2116 * pre-rate change notifications and returns early if no clks in the 2117 * subtree have subscribed to the notifications. Note that if a clk does not 2118 * implement the .recalc_rate callback then it is assumed that the clock will 2119 * take on the rate of its parent. 2120 */ 2121 static int __clk_speculate_rates(struct clk_core *core, 2122 unsigned long parent_rate) 2123 { 2124 struct clk_core *child; 2125 unsigned long new_rate; 2126 int ret = NOTIFY_DONE; 2127 2128 lockdep_assert_held(&prepare_lock); 2129 2130 new_rate = clk_recalc(core, parent_rate); 2131 2132 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 2133 if (core->notifier_count) 2134 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 2135 2136 if (ret & NOTIFY_STOP_MASK) { 2137 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 2138 __func__, core->name, ret); 2139 goto out; 2140 } 2141 2142 hlist_for_each_entry(child, &core->children, child_node) { 2143 ret = __clk_speculate_rates(child, new_rate); 2144 if (ret & NOTIFY_STOP_MASK) 2145 break; 2146 } 2147 2148 out: 2149 return ret; 2150 } 2151 2152 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 2153 struct clk_core *new_parent, u8 p_index) 2154 { 2155 struct clk_core *child; 2156 2157 core->new_rate = new_rate; 2158 core->new_parent = new_parent; 2159 core->new_parent_index = p_index; 2160 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 2161 core->new_child = NULL; 2162 if (new_parent && new_parent != core->parent) 2163 new_parent->new_child = core; 2164 2165 hlist_for_each_entry(child, &core->children, child_node) { 2166 child->new_rate = clk_recalc(child, new_rate); 2167 clk_calc_subtree(child, child->new_rate, NULL, 0); 2168 } 2169 } 2170 2171 /* 2172 * calculate the new rates returning the topmost clock that has to be 2173 * changed. 2174 */ 2175 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 2176 unsigned long rate) 2177 { 2178 struct clk_core *top = core; 2179 struct clk_core *old_parent, *parent; 2180 unsigned long best_parent_rate = 0; 2181 unsigned long new_rate; 2182 unsigned long min_rate; 2183 unsigned long max_rate; 2184 int p_index = 0; 2185 long ret; 2186 2187 /* sanity */ 2188 if (IS_ERR_OR_NULL(core)) 2189 return NULL; 2190 2191 /* save parent rate, if it exists */ 2192 parent = old_parent = core->parent; 2193 if (parent) 2194 best_parent_rate = parent->rate; 2195 2196 clk_core_get_boundaries(core, &min_rate, &max_rate); 2197 2198 /* find the closest rate and parent clk/rate */ 2199 if (clk_core_can_round(core)) { 2200 struct clk_rate_request req; 2201 2202 clk_core_init_rate_req(core, &req, rate); 2203 2204 trace_clk_rate_request_start(&req); 2205 2206 ret = clk_core_determine_round_nolock(core, &req); 2207 if (ret < 0) 2208 return NULL; 2209 2210 trace_clk_rate_request_done(&req); 2211 2212 best_parent_rate = req.best_parent_rate; 2213 new_rate = req.rate; 2214 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 2215 2216 if (new_rate < min_rate || new_rate > max_rate) 2217 return NULL; 2218 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 2219 /* pass-through clock without adjustable parent */ 2220 core->new_rate = core->rate; 2221 return NULL; 2222 } else { 2223 /* pass-through clock with adjustable parent */ 2224 top = clk_calc_new_rates(parent, rate); 2225 new_rate = parent->new_rate; 2226 goto out; 2227 } 2228 2229 /* some clocks must be gated to change parent */ 2230 if (parent != old_parent && 2231 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 2232 pr_debug("%s: %s not gated but wants to reparent\n", 2233 __func__, core->name); 2234 return NULL; 2235 } 2236 2237 /* try finding the new parent index */ 2238 if (parent && core->num_parents > 1) { 2239 p_index = clk_fetch_parent_index(core, parent); 2240 if (p_index < 0) { 2241 pr_debug("%s: clk %s can not be parent of clk %s\n", 2242 __func__, parent->name, core->name); 2243 return NULL; 2244 } 2245 } 2246 2247 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 2248 best_parent_rate != parent->rate) 2249 top = clk_calc_new_rates(parent, best_parent_rate); 2250 2251 out: 2252 clk_calc_subtree(core, new_rate, parent, p_index); 2253 2254 return top; 2255 } 2256 2257 /* 2258 * Notify about rate changes in a subtree. Always walk down the whole tree 2259 * so that in case of an error we can walk down the whole tree again and 2260 * abort the change. 2261 */ 2262 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 2263 unsigned long event) 2264 { 2265 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 2266 int ret = NOTIFY_DONE; 2267 2268 if (core->rate == core->new_rate) 2269 return NULL; 2270 2271 if (core->notifier_count) { 2272 ret = __clk_notify(core, event, core->rate, core->new_rate); 2273 if (ret & NOTIFY_STOP_MASK) 2274 fail_clk = core; 2275 } 2276 2277 hlist_for_each_entry(child, &core->children, child_node) { 2278 /* Skip children who will be reparented to another clock */ 2279 if (child->new_parent && child->new_parent != core) 2280 continue; 2281 tmp_clk = clk_propagate_rate_change(child, event); 2282 if (tmp_clk) 2283 fail_clk = tmp_clk; 2284 } 2285 2286 /* handle the new child who might not be in core->children yet */ 2287 if (core->new_child) { 2288 tmp_clk = clk_propagate_rate_change(core->new_child, event); 2289 if (tmp_clk) 2290 fail_clk = tmp_clk; 2291 } 2292 2293 return fail_clk; 2294 } 2295 2296 /* 2297 * walk down a subtree and set the new rates notifying the rate 2298 * change on the way 2299 */ 2300 static void clk_change_rate(struct clk_core *core) 2301 { 2302 struct clk_core *child; 2303 struct hlist_node *tmp; 2304 unsigned long old_rate; 2305 unsigned long best_parent_rate = 0; 2306 bool skip_set_rate = false; 2307 struct clk_core *old_parent; 2308 struct clk_core *parent = NULL; 2309 2310 old_rate = core->rate; 2311 2312 if (core->new_parent) { 2313 parent = core->new_parent; 2314 best_parent_rate = core->new_parent->rate; 2315 } else if (core->parent) { 2316 parent = core->parent; 2317 best_parent_rate = core->parent->rate; 2318 } 2319 2320 if (clk_pm_runtime_get(core)) 2321 return; 2322 2323 if (core->flags & CLK_SET_RATE_UNGATE) { 2324 clk_core_prepare(core); 2325 clk_core_enable_lock(core); 2326 } 2327 2328 if (core->new_parent && core->new_parent != core->parent) { 2329 old_parent = __clk_set_parent_before(core, core->new_parent); 2330 trace_clk_set_parent(core, core->new_parent); 2331 2332 if (core->ops->set_rate_and_parent) { 2333 skip_set_rate = true; 2334 core->ops->set_rate_and_parent(core->hw, core->new_rate, 2335 best_parent_rate, 2336 core->new_parent_index); 2337 } else if (core->ops->set_parent) { 2338 core->ops->set_parent(core->hw, core->new_parent_index); 2339 } 2340 2341 trace_clk_set_parent_complete(core, core->new_parent); 2342 __clk_set_parent_after(core, core->new_parent, old_parent); 2343 } 2344 2345 if (core->flags & CLK_OPS_PARENT_ENABLE) 2346 clk_core_prepare_enable(parent); 2347 2348 trace_clk_set_rate(core, core->new_rate); 2349 2350 if (!skip_set_rate && core->ops->set_rate) 2351 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 2352 2353 trace_clk_set_rate_complete(core, core->new_rate); 2354 2355 core->rate = clk_recalc(core, best_parent_rate); 2356 2357 if (core->flags & CLK_SET_RATE_UNGATE) { 2358 clk_core_disable_lock(core); 2359 clk_core_unprepare(core); 2360 } 2361 2362 if (core->flags & CLK_OPS_PARENT_ENABLE) 2363 clk_core_disable_unprepare(parent); 2364 2365 if (core->notifier_count && old_rate != core->rate) 2366 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 2367 2368 if (core->flags & CLK_RECALC_NEW_RATES) 2369 (void)clk_calc_new_rates(core, core->new_rate); 2370 2371 /* 2372 * Use safe iteration, as change_rate can actually swap parents 2373 * for certain clock types. 2374 */ 2375 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 2376 /* Skip children who will be reparented to another clock */ 2377 if (child->new_parent && child->new_parent != core) 2378 continue; 2379 clk_change_rate(child); 2380 } 2381 2382 /* handle the new child who might not be in core->children yet */ 2383 if (core->new_child) 2384 clk_change_rate(core->new_child); 2385 2386 clk_pm_runtime_put(core); 2387 } 2388 2389 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, 2390 unsigned long req_rate) 2391 { 2392 int ret, cnt; 2393 struct clk_rate_request req; 2394 2395 lockdep_assert_held(&prepare_lock); 2396 2397 if (!core) 2398 return 0; 2399 2400 /* simulate what the rate would be if it could be freely set */ 2401 cnt = clk_core_rate_nuke_protect(core); 2402 if (cnt < 0) 2403 return cnt; 2404 2405 clk_core_init_rate_req(core, &req, req_rate); 2406 2407 trace_clk_rate_request_start(&req); 2408 2409 ret = clk_core_round_rate_nolock(core, &req); 2410 2411 trace_clk_rate_request_done(&req); 2412 2413 /* restore the protection */ 2414 clk_core_rate_restore_protect(core, cnt); 2415 2416 return ret ? 0 : req.rate; 2417 } 2418 2419 static int clk_core_set_rate_nolock(struct clk_core *core, 2420 unsigned long req_rate) 2421 { 2422 struct clk_core *top, *fail_clk; 2423 unsigned long rate; 2424 int ret; 2425 2426 if (!core) 2427 return 0; 2428 2429 rate = clk_core_req_round_rate_nolock(core, req_rate); 2430 2431 /* bail early if nothing to do */ 2432 if (rate == clk_core_get_rate_nolock(core)) 2433 return 0; 2434 2435 /* fail on a direct rate set of a protected provider */ 2436 if (clk_core_rate_is_protected(core)) 2437 return -EBUSY; 2438 2439 /* calculate new rates and get the topmost changed clock */ 2440 top = clk_calc_new_rates(core, req_rate); 2441 if (!top) 2442 return -EINVAL; 2443 2444 ret = clk_pm_runtime_get(core); 2445 if (ret) 2446 return ret; 2447 2448 /* notify that we are about to change rates */ 2449 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 2450 if (fail_clk) { 2451 pr_debug("%s: failed to set %s rate\n", __func__, 2452 fail_clk->name); 2453 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 2454 ret = -EBUSY; 2455 goto err; 2456 } 2457 2458 /* change the rates */ 2459 clk_change_rate(top); 2460 2461 core->req_rate = req_rate; 2462 err: 2463 clk_pm_runtime_put(core); 2464 2465 return ret; 2466 } 2467 2468 /** 2469 * clk_set_rate - specify a new rate for clk 2470 * @clk: the clk whose rate is being changed 2471 * @rate: the new rate for clk 2472 * 2473 * In the simplest case clk_set_rate will only adjust the rate of clk. 2474 * 2475 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 2476 * propagate up to clk's parent; whether or not this happens depends on the 2477 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 2478 * after calling .round_rate then upstream parent propagation is ignored. If 2479 * *parent_rate comes back with a new rate for clk's parent then we propagate 2480 * up to clk's parent and set its rate. Upward propagation will continue 2481 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 2482 * .round_rate stops requesting changes to clk's parent_rate. 2483 * 2484 * Rate changes are accomplished via tree traversal that also recalculates the 2485 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 2486 * 2487 * Returns 0 on success, -EERROR otherwise. 2488 */ 2489 int clk_set_rate(struct clk *clk, unsigned long rate) 2490 { 2491 int ret; 2492 2493 if (!clk) 2494 return 0; 2495 2496 /* prevent racing with updates to the clock topology */ 2497 clk_prepare_lock(); 2498 2499 if (clk->exclusive_count) 2500 clk_core_rate_unprotect(clk->core); 2501 2502 ret = clk_core_set_rate_nolock(clk->core, rate); 2503 2504 if (clk->exclusive_count) 2505 clk_core_rate_protect(clk->core); 2506 2507 clk_prepare_unlock(); 2508 2509 return ret; 2510 } 2511 EXPORT_SYMBOL_GPL(clk_set_rate); 2512 2513 /** 2514 * clk_set_rate_exclusive - specify a new rate and get exclusive control 2515 * @clk: the clk whose rate is being changed 2516 * @rate: the new rate for clk 2517 * 2518 * This is a combination of clk_set_rate() and clk_rate_exclusive_get() 2519 * within a critical section 2520 * 2521 * This can be used initially to ensure that at least 1 consumer is 2522 * satisfied when several consumers are competing for exclusivity over the 2523 * same clock provider. 2524 * 2525 * The exclusivity is not applied if setting the rate failed. 2526 * 2527 * Calls to clk_rate_exclusive_get() should be balanced with calls to 2528 * clk_rate_exclusive_put(). 2529 * 2530 * Returns 0 on success, -EERROR otherwise. 2531 */ 2532 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 2533 { 2534 int ret; 2535 2536 if (!clk) 2537 return 0; 2538 2539 /* prevent racing with updates to the clock topology */ 2540 clk_prepare_lock(); 2541 2542 /* 2543 * The temporary protection removal is not here, on purpose 2544 * This function is meant to be used instead of clk_rate_protect, 2545 * so before the consumer code path protect the clock provider 2546 */ 2547 2548 ret = clk_core_set_rate_nolock(clk->core, rate); 2549 if (!ret) { 2550 clk_core_rate_protect(clk->core); 2551 clk->exclusive_count++; 2552 } 2553 2554 clk_prepare_unlock(); 2555 2556 return ret; 2557 } 2558 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); 2559 2560 static int clk_set_rate_range_nolock(struct clk *clk, 2561 unsigned long min, 2562 unsigned long max) 2563 { 2564 int ret = 0; 2565 unsigned long old_min, old_max, rate; 2566 2567 lockdep_assert_held(&prepare_lock); 2568 2569 if (!clk) 2570 return 0; 2571 2572 trace_clk_set_rate_range(clk->core, min, max); 2573 2574 if (min > max) { 2575 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 2576 __func__, clk->core->name, clk->dev_id, clk->con_id, 2577 min, max); 2578 return -EINVAL; 2579 } 2580 2581 if (clk->exclusive_count) 2582 clk_core_rate_unprotect(clk->core); 2583 2584 /* Save the current values in case we need to rollback the change */ 2585 old_min = clk->min_rate; 2586 old_max = clk->max_rate; 2587 clk->min_rate = min; 2588 clk->max_rate = max; 2589 2590 if (!clk_core_check_boundaries(clk->core, min, max)) { 2591 ret = -EINVAL; 2592 goto out; 2593 } 2594 2595 rate = clk->core->req_rate; 2596 if (clk->core->flags & CLK_GET_RATE_NOCACHE) 2597 rate = clk_core_get_rate_recalc(clk->core); 2598 2599 /* 2600 * Since the boundaries have been changed, let's give the 2601 * opportunity to the provider to adjust the clock rate based on 2602 * the new boundaries. 2603 * 2604 * We also need to handle the case where the clock is currently 2605 * outside of the boundaries. Clamping the last requested rate 2606 * to the current minimum and maximum will also handle this. 2607 * 2608 * FIXME: 2609 * There is a catch. It may fail for the usual reason (clock 2610 * broken, clock protected, etc) but also because: 2611 * - round_rate() was not favorable and fell on the wrong 2612 * side of the boundary 2613 * - the determine_rate() callback does not really check for 2614 * this corner case when determining the rate 2615 */ 2616 rate = clamp(rate, min, max); 2617 ret = clk_core_set_rate_nolock(clk->core, rate); 2618 if (ret) { 2619 /* rollback the changes */ 2620 clk->min_rate = old_min; 2621 clk->max_rate = old_max; 2622 } 2623 2624 out: 2625 if (clk->exclusive_count) 2626 clk_core_rate_protect(clk->core); 2627 2628 return ret; 2629 } 2630 2631 /** 2632 * clk_set_rate_range - set a rate range for a clock source 2633 * @clk: clock source 2634 * @min: desired minimum clock rate in Hz, inclusive 2635 * @max: desired maximum clock rate in Hz, inclusive 2636 * 2637 * Return: 0 for success or negative errno on failure. 2638 */ 2639 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 2640 { 2641 int ret; 2642 2643 if (!clk) 2644 return 0; 2645 2646 clk_prepare_lock(); 2647 2648 ret = clk_set_rate_range_nolock(clk, min, max); 2649 2650 clk_prepare_unlock(); 2651 2652 return ret; 2653 } 2654 EXPORT_SYMBOL_GPL(clk_set_rate_range); 2655 2656 /** 2657 * clk_set_min_rate - set a minimum clock rate for a clock source 2658 * @clk: clock source 2659 * @rate: desired minimum clock rate in Hz, inclusive 2660 * 2661 * Returns success (0) or negative errno. 2662 */ 2663 int clk_set_min_rate(struct clk *clk, unsigned long rate) 2664 { 2665 if (!clk) 2666 return 0; 2667 2668 trace_clk_set_min_rate(clk->core, rate); 2669 2670 return clk_set_rate_range(clk, rate, clk->max_rate); 2671 } 2672 EXPORT_SYMBOL_GPL(clk_set_min_rate); 2673 2674 /** 2675 * clk_set_max_rate - set a maximum clock rate for a clock source 2676 * @clk: clock source 2677 * @rate: desired maximum clock rate in Hz, inclusive 2678 * 2679 * Returns success (0) or negative errno. 2680 */ 2681 int clk_set_max_rate(struct clk *clk, unsigned long rate) 2682 { 2683 if (!clk) 2684 return 0; 2685 2686 trace_clk_set_max_rate(clk->core, rate); 2687 2688 return clk_set_rate_range(clk, clk->min_rate, rate); 2689 } 2690 EXPORT_SYMBOL_GPL(clk_set_max_rate); 2691 2692 /** 2693 * clk_get_parent - return the parent of a clk 2694 * @clk: the clk whose parent gets returned 2695 * 2696 * Simply returns clk->parent. Returns NULL if clk is NULL. 2697 */ 2698 struct clk *clk_get_parent(struct clk *clk) 2699 { 2700 struct clk *parent; 2701 2702 if (!clk) 2703 return NULL; 2704 2705 clk_prepare_lock(); 2706 /* TODO: Create a per-user clk and change callers to call clk_put */ 2707 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 2708 clk_prepare_unlock(); 2709 2710 return parent; 2711 } 2712 EXPORT_SYMBOL_GPL(clk_get_parent); 2713 2714 static struct clk_core *__clk_init_parent(struct clk_core *core) 2715 { 2716 u8 index = 0; 2717 2718 if (core->num_parents > 1 && core->ops->get_parent) 2719 index = core->ops->get_parent(core->hw); 2720 2721 return clk_core_get_parent_by_index(core, index); 2722 } 2723 2724 static void clk_core_reparent(struct clk_core *core, 2725 struct clk_core *new_parent) 2726 { 2727 clk_reparent(core, new_parent); 2728 __clk_recalc_accuracies(core); 2729 __clk_recalc_rates(core, true, POST_RATE_CHANGE); 2730 } 2731 2732 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 2733 { 2734 if (!hw) 2735 return; 2736 2737 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 2738 } 2739 2740 /** 2741 * clk_has_parent - check if a clock is a possible parent for another 2742 * @clk: clock source 2743 * @parent: parent clock source 2744 * 2745 * This function can be used in drivers that need to check that a clock can be 2746 * the parent of another without actually changing the parent. 2747 * 2748 * Returns true if @parent is a possible parent for @clk, false otherwise. 2749 */ 2750 bool clk_has_parent(const struct clk *clk, const struct clk *parent) 2751 { 2752 /* NULL clocks should be nops, so return success if either is NULL. */ 2753 if (!clk || !parent) 2754 return true; 2755 2756 return clk_core_has_parent(clk->core, parent->core); 2757 } 2758 EXPORT_SYMBOL_GPL(clk_has_parent); 2759 2760 static int clk_core_set_parent_nolock(struct clk_core *core, 2761 struct clk_core *parent) 2762 { 2763 int ret = 0; 2764 int p_index = 0; 2765 unsigned long p_rate = 0; 2766 2767 lockdep_assert_held(&prepare_lock); 2768 2769 if (!core) 2770 return 0; 2771 2772 if (core->parent == parent) 2773 return 0; 2774 2775 /* verify ops for multi-parent clks */ 2776 if (core->num_parents > 1 && !core->ops->set_parent) 2777 return -EPERM; 2778 2779 /* check that we are allowed to re-parent if the clock is in use */ 2780 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) 2781 return -EBUSY; 2782 2783 if (clk_core_rate_is_protected(core)) 2784 return -EBUSY; 2785 2786 /* try finding the new parent index */ 2787 if (parent) { 2788 p_index = clk_fetch_parent_index(core, parent); 2789 if (p_index < 0) { 2790 pr_debug("%s: clk %s can not be parent of clk %s\n", 2791 __func__, parent->name, core->name); 2792 return p_index; 2793 } 2794 p_rate = parent->rate; 2795 } 2796 2797 ret = clk_pm_runtime_get(core); 2798 if (ret) 2799 return ret; 2800 2801 /* propagate PRE_RATE_CHANGE notifications */ 2802 ret = __clk_speculate_rates(core, p_rate); 2803 2804 /* abort if a driver objects */ 2805 if (ret & NOTIFY_STOP_MASK) 2806 goto runtime_put; 2807 2808 /* do the re-parent */ 2809 ret = __clk_set_parent(core, parent, p_index); 2810 2811 /* propagate rate an accuracy recalculation accordingly */ 2812 if (ret) { 2813 __clk_recalc_rates(core, true, ABORT_RATE_CHANGE); 2814 } else { 2815 __clk_recalc_rates(core, true, POST_RATE_CHANGE); 2816 __clk_recalc_accuracies(core); 2817 } 2818 2819 runtime_put: 2820 clk_pm_runtime_put(core); 2821 2822 return ret; 2823 } 2824 2825 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent) 2826 { 2827 return clk_core_set_parent_nolock(hw->core, parent->core); 2828 } 2829 EXPORT_SYMBOL_GPL(clk_hw_set_parent); 2830 2831 /** 2832 * clk_set_parent - switch the parent of a mux clk 2833 * @clk: the mux clk whose input we are switching 2834 * @parent: the new input to clk 2835 * 2836 * Re-parent clk to use parent as its new input source. If clk is in 2837 * prepared state, the clk will get enabled for the duration of this call. If 2838 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2839 * that, the reparenting is glitchy in hardware, etc), use the 2840 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2841 * 2842 * After successfully changing clk's parent clk_set_parent will update the 2843 * clk topology, sysfs topology and propagate rate recalculation via 2844 * __clk_recalc_rates. 2845 * 2846 * Returns 0 on success, -EERROR otherwise. 2847 */ 2848 int clk_set_parent(struct clk *clk, struct clk *parent) 2849 { 2850 int ret; 2851 2852 if (!clk) 2853 return 0; 2854 2855 clk_prepare_lock(); 2856 2857 if (clk->exclusive_count) 2858 clk_core_rate_unprotect(clk->core); 2859 2860 ret = clk_core_set_parent_nolock(clk->core, 2861 parent ? parent->core : NULL); 2862 2863 if (clk->exclusive_count) 2864 clk_core_rate_protect(clk->core); 2865 2866 clk_prepare_unlock(); 2867 2868 return ret; 2869 } 2870 EXPORT_SYMBOL_GPL(clk_set_parent); 2871 2872 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) 2873 { 2874 int ret = -EINVAL; 2875 2876 lockdep_assert_held(&prepare_lock); 2877 2878 if (!core) 2879 return 0; 2880 2881 if (clk_core_rate_is_protected(core)) 2882 return -EBUSY; 2883 2884 trace_clk_set_phase(core, degrees); 2885 2886 if (core->ops->set_phase) { 2887 ret = core->ops->set_phase(core->hw, degrees); 2888 if (!ret) 2889 core->phase = degrees; 2890 } 2891 2892 trace_clk_set_phase_complete(core, degrees); 2893 2894 return ret; 2895 } 2896 2897 /** 2898 * clk_set_phase - adjust the phase shift of a clock signal 2899 * @clk: clock signal source 2900 * @degrees: number of degrees the signal is shifted 2901 * 2902 * Shifts the phase of a clock signal by the specified 2903 * degrees. Returns 0 on success, -EERROR otherwise. 2904 * 2905 * This function makes no distinction about the input or reference 2906 * signal that we adjust the clock signal phase against. For example 2907 * phase locked-loop clock signal generators we may shift phase with 2908 * respect to feedback clock signal input, but for other cases the 2909 * clock phase may be shifted with respect to some other, unspecified 2910 * signal. 2911 * 2912 * Additionally the concept of phase shift does not propagate through 2913 * the clock tree hierarchy, which sets it apart from clock rates and 2914 * clock accuracy. A parent clock phase attribute does not have an 2915 * impact on the phase attribute of a child clock. 2916 */ 2917 int clk_set_phase(struct clk *clk, int degrees) 2918 { 2919 int ret; 2920 2921 if (!clk) 2922 return 0; 2923 2924 /* sanity check degrees */ 2925 degrees %= 360; 2926 if (degrees < 0) 2927 degrees += 360; 2928 2929 clk_prepare_lock(); 2930 2931 if (clk->exclusive_count) 2932 clk_core_rate_unprotect(clk->core); 2933 2934 ret = clk_core_set_phase_nolock(clk->core, degrees); 2935 2936 if (clk->exclusive_count) 2937 clk_core_rate_protect(clk->core); 2938 2939 clk_prepare_unlock(); 2940 2941 return ret; 2942 } 2943 EXPORT_SYMBOL_GPL(clk_set_phase); 2944 2945 static int clk_core_get_phase(struct clk_core *core) 2946 { 2947 int ret; 2948 2949 lockdep_assert_held(&prepare_lock); 2950 if (!core->ops->get_phase) 2951 return 0; 2952 2953 /* Always try to update cached phase if possible */ 2954 ret = core->ops->get_phase(core->hw); 2955 if (ret >= 0) 2956 core->phase = ret; 2957 2958 return ret; 2959 } 2960 2961 /** 2962 * clk_get_phase - return the phase shift of a clock signal 2963 * @clk: clock signal source 2964 * 2965 * Returns the phase shift of a clock node in degrees, otherwise returns 2966 * -EERROR. 2967 */ 2968 int clk_get_phase(struct clk *clk) 2969 { 2970 int ret; 2971 2972 if (!clk) 2973 return 0; 2974 2975 clk_prepare_lock(); 2976 ret = clk_core_get_phase(clk->core); 2977 clk_prepare_unlock(); 2978 2979 return ret; 2980 } 2981 EXPORT_SYMBOL_GPL(clk_get_phase); 2982 2983 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) 2984 { 2985 /* Assume a default value of 50% */ 2986 core->duty.num = 1; 2987 core->duty.den = 2; 2988 } 2989 2990 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); 2991 2992 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) 2993 { 2994 struct clk_duty *duty = &core->duty; 2995 int ret = 0; 2996 2997 if (!core->ops->get_duty_cycle) 2998 return clk_core_update_duty_cycle_parent_nolock(core); 2999 3000 ret = core->ops->get_duty_cycle(core->hw, duty); 3001 if (ret) 3002 goto reset; 3003 3004 /* Don't trust the clock provider too much */ 3005 if (duty->den == 0 || duty->num > duty->den) { 3006 ret = -EINVAL; 3007 goto reset; 3008 } 3009 3010 return 0; 3011 3012 reset: 3013 clk_core_reset_duty_cycle_nolock(core); 3014 return ret; 3015 } 3016 3017 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) 3018 { 3019 int ret = 0; 3020 3021 if (core->parent && 3022 core->flags & CLK_DUTY_CYCLE_PARENT) { 3023 ret = clk_core_update_duty_cycle_nolock(core->parent); 3024 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 3025 } else { 3026 clk_core_reset_duty_cycle_nolock(core); 3027 } 3028 3029 return ret; 3030 } 3031 3032 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 3033 struct clk_duty *duty); 3034 3035 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, 3036 struct clk_duty *duty) 3037 { 3038 int ret; 3039 3040 lockdep_assert_held(&prepare_lock); 3041 3042 if (clk_core_rate_is_protected(core)) 3043 return -EBUSY; 3044 3045 trace_clk_set_duty_cycle(core, duty); 3046 3047 if (!core->ops->set_duty_cycle) 3048 return clk_core_set_duty_cycle_parent_nolock(core, duty); 3049 3050 ret = core->ops->set_duty_cycle(core->hw, duty); 3051 if (!ret) 3052 memcpy(&core->duty, duty, sizeof(*duty)); 3053 3054 trace_clk_set_duty_cycle_complete(core, duty); 3055 3056 return ret; 3057 } 3058 3059 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 3060 struct clk_duty *duty) 3061 { 3062 int ret = 0; 3063 3064 if (core->parent && 3065 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { 3066 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); 3067 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 3068 } 3069 3070 return ret; 3071 } 3072 3073 /** 3074 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 3075 * @clk: clock signal source 3076 * @num: numerator of the duty cycle ratio to be applied 3077 * @den: denominator of the duty cycle ratio to be applied 3078 * 3079 * Apply the duty cycle ratio if the ratio is valid and the clock can 3080 * perform this operation 3081 * 3082 * Returns (0) on success, a negative errno otherwise. 3083 */ 3084 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) 3085 { 3086 int ret; 3087 struct clk_duty duty; 3088 3089 if (!clk) 3090 return 0; 3091 3092 /* sanity check the ratio */ 3093 if (den == 0 || num > den) 3094 return -EINVAL; 3095 3096 duty.num = num; 3097 duty.den = den; 3098 3099 clk_prepare_lock(); 3100 3101 if (clk->exclusive_count) 3102 clk_core_rate_unprotect(clk->core); 3103 3104 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); 3105 3106 if (clk->exclusive_count) 3107 clk_core_rate_protect(clk->core); 3108 3109 clk_prepare_unlock(); 3110 3111 return ret; 3112 } 3113 EXPORT_SYMBOL_GPL(clk_set_duty_cycle); 3114 3115 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, 3116 unsigned int scale) 3117 { 3118 struct clk_duty *duty = &core->duty; 3119 int ret; 3120 3121 clk_prepare_lock(); 3122 3123 ret = clk_core_update_duty_cycle_nolock(core); 3124 if (!ret) 3125 ret = mult_frac(scale, duty->num, duty->den); 3126 3127 clk_prepare_unlock(); 3128 3129 return ret; 3130 } 3131 3132 /** 3133 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 3134 * @clk: clock signal source 3135 * @scale: scaling factor to be applied to represent the ratio as an integer 3136 * 3137 * Returns the duty cycle ratio of a clock node multiplied by the provided 3138 * scaling factor, or negative errno on error. 3139 */ 3140 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) 3141 { 3142 if (!clk) 3143 return 0; 3144 3145 return clk_core_get_scaled_duty_cycle(clk->core, scale); 3146 } 3147 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); 3148 3149 /** 3150 * clk_is_match - check if two clk's point to the same hardware clock 3151 * @p: clk compared against q 3152 * @q: clk compared against p 3153 * 3154 * Returns true if the two struct clk pointers both point to the same hardware 3155 * clock node. Put differently, returns true if struct clk *p and struct clk *q 3156 * share the same struct clk_core object. 3157 * 3158 * Returns false otherwise. Note that two NULL clks are treated as matching. 3159 */ 3160 bool clk_is_match(const struct clk *p, const struct clk *q) 3161 { 3162 /* trivial case: identical struct clk's or both NULL */ 3163 if (p == q) 3164 return true; 3165 3166 /* true if clk->core pointers match. Avoid dereferencing garbage */ 3167 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 3168 if (p->core == q->core) 3169 return true; 3170 3171 return false; 3172 } 3173 EXPORT_SYMBOL_GPL(clk_is_match); 3174 3175 /*** debugfs support ***/ 3176 3177 #ifdef CONFIG_DEBUG_FS 3178 #include <linux/debugfs.h> 3179 3180 static struct dentry *rootdir; 3181 static int inited = 0; 3182 static DEFINE_MUTEX(clk_debug_lock); 3183 static HLIST_HEAD(clk_debug_list); 3184 3185 static struct hlist_head *orphan_list[] = { 3186 &clk_orphan_list, 3187 NULL, 3188 }; 3189 3190 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 3191 int level) 3192 { 3193 int phase; 3194 3195 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ", 3196 level * 3 + 1, "", 3197 30 - level * 3, c->name, 3198 c->enable_count, c->prepare_count, c->protect_count, 3199 clk_core_get_rate_recalc(c), 3200 clk_core_get_accuracy_recalc(c)); 3201 3202 phase = clk_core_get_phase(c); 3203 if (phase >= 0) 3204 seq_printf(s, "%5d", phase); 3205 else 3206 seq_puts(s, "-----"); 3207 3208 seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000)); 3209 3210 if (c->ops->is_enabled) 3211 seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N'); 3212 else if (!c->ops->enable) 3213 seq_printf(s, " %9c\n", 'Y'); 3214 else 3215 seq_printf(s, " %9c\n", '?'); 3216 } 3217 3218 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 3219 int level) 3220 { 3221 struct clk_core *child; 3222 3223 clk_pm_runtime_get(c); 3224 clk_summary_show_one(s, c, level); 3225 clk_pm_runtime_put(c); 3226 3227 hlist_for_each_entry(child, &c->children, child_node) 3228 clk_summary_show_subtree(s, child, level + 1); 3229 } 3230 3231 static int clk_summary_show(struct seq_file *s, void *data) 3232 { 3233 struct clk_core *c; 3234 struct hlist_head **lists = s->private; 3235 3236 seq_puts(s, " enable prepare protect duty hardware\n"); 3237 seq_puts(s, " clock count count count rate accuracy phase cycle enable\n"); 3238 seq_puts(s, "-------------------------------------------------------------------------------------------------------\n"); 3239 3240 clk_prepare_lock(); 3241 3242 for (; *lists; lists++) 3243 hlist_for_each_entry(c, *lists, child_node) 3244 clk_summary_show_subtree(s, c, 0); 3245 3246 clk_prepare_unlock(); 3247 3248 return 0; 3249 } 3250 DEFINE_SHOW_ATTRIBUTE(clk_summary); 3251 3252 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 3253 { 3254 int phase; 3255 unsigned long min_rate, max_rate; 3256 3257 clk_core_get_boundaries(c, &min_rate, &max_rate); 3258 3259 /* This should be JSON format, i.e. elements separated with a comma */ 3260 seq_printf(s, "\"%s\": { ", c->name); 3261 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 3262 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 3263 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 3264 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c)); 3265 seq_printf(s, "\"min_rate\": %lu,", min_rate); 3266 seq_printf(s, "\"max_rate\": %lu,", max_rate); 3267 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c)); 3268 phase = clk_core_get_phase(c); 3269 if (phase >= 0) 3270 seq_printf(s, "\"phase\": %d,", phase); 3271 seq_printf(s, "\"duty_cycle\": %u", 3272 clk_core_get_scaled_duty_cycle(c, 100000)); 3273 } 3274 3275 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 3276 { 3277 struct clk_core *child; 3278 3279 clk_dump_one(s, c, level); 3280 3281 hlist_for_each_entry(child, &c->children, child_node) { 3282 seq_putc(s, ','); 3283 clk_dump_subtree(s, child, level + 1); 3284 } 3285 3286 seq_putc(s, '}'); 3287 } 3288 3289 static int clk_dump_show(struct seq_file *s, void *data) 3290 { 3291 struct clk_core *c; 3292 bool first_node = true; 3293 struct hlist_head **lists = s->private; 3294 3295 seq_putc(s, '{'); 3296 clk_prepare_lock(); 3297 3298 for (; *lists; lists++) { 3299 hlist_for_each_entry(c, *lists, child_node) { 3300 if (!first_node) 3301 seq_putc(s, ','); 3302 first_node = false; 3303 clk_dump_subtree(s, c, 0); 3304 } 3305 } 3306 3307 clk_prepare_unlock(); 3308 3309 seq_puts(s, "}\n"); 3310 return 0; 3311 } 3312 DEFINE_SHOW_ATTRIBUTE(clk_dump); 3313 3314 #undef CLOCK_ALLOW_WRITE_DEBUGFS 3315 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3316 /* 3317 * This can be dangerous, therefore don't provide any real compile time 3318 * configuration option for this feature. 3319 * People who want to use this will need to modify the source code directly. 3320 */ 3321 static int clk_rate_set(void *data, u64 val) 3322 { 3323 struct clk_core *core = data; 3324 int ret; 3325 3326 clk_prepare_lock(); 3327 ret = clk_core_set_rate_nolock(core, val); 3328 clk_prepare_unlock(); 3329 3330 return ret; 3331 } 3332 3333 #define clk_rate_mode 0644 3334 3335 static int clk_prepare_enable_set(void *data, u64 val) 3336 { 3337 struct clk_core *core = data; 3338 int ret = 0; 3339 3340 if (val) 3341 ret = clk_prepare_enable(core->hw->clk); 3342 else 3343 clk_disable_unprepare(core->hw->clk); 3344 3345 return ret; 3346 } 3347 3348 static int clk_prepare_enable_get(void *data, u64 *val) 3349 { 3350 struct clk_core *core = data; 3351 3352 *val = core->enable_count && core->prepare_count; 3353 return 0; 3354 } 3355 3356 DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get, 3357 clk_prepare_enable_set, "%llu\n"); 3358 3359 #else 3360 #define clk_rate_set NULL 3361 #define clk_rate_mode 0444 3362 #endif 3363 3364 static int clk_rate_get(void *data, u64 *val) 3365 { 3366 struct clk_core *core = data; 3367 3368 clk_prepare_lock(); 3369 *val = clk_core_get_rate_recalc(core); 3370 clk_prepare_unlock(); 3371 3372 return 0; 3373 } 3374 3375 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n"); 3376 3377 static const struct { 3378 unsigned long flag; 3379 const char *name; 3380 } clk_flags[] = { 3381 #define ENTRY(f) { f, #f } 3382 ENTRY(CLK_SET_RATE_GATE), 3383 ENTRY(CLK_SET_PARENT_GATE), 3384 ENTRY(CLK_SET_RATE_PARENT), 3385 ENTRY(CLK_IGNORE_UNUSED), 3386 ENTRY(CLK_GET_RATE_NOCACHE), 3387 ENTRY(CLK_SET_RATE_NO_REPARENT), 3388 ENTRY(CLK_GET_ACCURACY_NOCACHE), 3389 ENTRY(CLK_RECALC_NEW_RATES), 3390 ENTRY(CLK_SET_RATE_UNGATE), 3391 ENTRY(CLK_IS_CRITICAL), 3392 ENTRY(CLK_OPS_PARENT_ENABLE), 3393 ENTRY(CLK_DUTY_CYCLE_PARENT), 3394 #undef ENTRY 3395 }; 3396 3397 static int clk_flags_show(struct seq_file *s, void *data) 3398 { 3399 struct clk_core *core = s->private; 3400 unsigned long flags = core->flags; 3401 unsigned int i; 3402 3403 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { 3404 if (flags & clk_flags[i].flag) { 3405 seq_printf(s, "%s\n", clk_flags[i].name); 3406 flags &= ~clk_flags[i].flag; 3407 } 3408 } 3409 if (flags) { 3410 /* Unknown flags */ 3411 seq_printf(s, "0x%lx\n", flags); 3412 } 3413 3414 return 0; 3415 } 3416 DEFINE_SHOW_ATTRIBUTE(clk_flags); 3417 3418 static void possible_parent_show(struct seq_file *s, struct clk_core *core, 3419 unsigned int i, char terminator) 3420 { 3421 struct clk_core *parent; 3422 const char *name = NULL; 3423 3424 /* 3425 * Go through the following options to fetch a parent's name. 3426 * 3427 * 1. Fetch the registered parent clock and use its name 3428 * 2. Use the global (fallback) name if specified 3429 * 3. Use the local fw_name if provided 3430 * 4. Fetch parent clock's clock-output-name if DT index was set 3431 * 3432 * This may still fail in some cases, such as when the parent is 3433 * specified directly via a struct clk_hw pointer, but it isn't 3434 * registered (yet). 3435 */ 3436 parent = clk_core_get_parent_by_index(core, i); 3437 if (parent) { 3438 seq_puts(s, parent->name); 3439 } else if (core->parents[i].name) { 3440 seq_puts(s, core->parents[i].name); 3441 } else if (core->parents[i].fw_name) { 3442 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); 3443 } else { 3444 if (core->parents[i].index >= 0) 3445 name = of_clk_get_parent_name(core->of_node, core->parents[i].index); 3446 if (!name) 3447 name = "(missing)"; 3448 3449 seq_puts(s, name); 3450 } 3451 3452 seq_putc(s, terminator); 3453 } 3454 3455 static int possible_parents_show(struct seq_file *s, void *data) 3456 { 3457 struct clk_core *core = s->private; 3458 int i; 3459 3460 for (i = 0; i < core->num_parents - 1; i++) 3461 possible_parent_show(s, core, i, ' '); 3462 3463 possible_parent_show(s, core, i, '\n'); 3464 3465 return 0; 3466 } 3467 DEFINE_SHOW_ATTRIBUTE(possible_parents); 3468 3469 static int current_parent_show(struct seq_file *s, void *data) 3470 { 3471 struct clk_core *core = s->private; 3472 3473 if (core->parent) 3474 seq_printf(s, "%s\n", core->parent->name); 3475 3476 return 0; 3477 } 3478 DEFINE_SHOW_ATTRIBUTE(current_parent); 3479 3480 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3481 static ssize_t current_parent_write(struct file *file, const char __user *ubuf, 3482 size_t count, loff_t *ppos) 3483 { 3484 struct seq_file *s = file->private_data; 3485 struct clk_core *core = s->private; 3486 struct clk_core *parent; 3487 u8 idx; 3488 int err; 3489 3490 err = kstrtou8_from_user(ubuf, count, 0, &idx); 3491 if (err < 0) 3492 return err; 3493 3494 parent = clk_core_get_parent_by_index(core, idx); 3495 if (!parent) 3496 return -ENOENT; 3497 3498 clk_prepare_lock(); 3499 err = clk_core_set_parent_nolock(core, parent); 3500 clk_prepare_unlock(); 3501 if (err) 3502 return err; 3503 3504 return count; 3505 } 3506 3507 static const struct file_operations current_parent_rw_fops = { 3508 .open = current_parent_open, 3509 .write = current_parent_write, 3510 .read = seq_read, 3511 .llseek = seq_lseek, 3512 .release = single_release, 3513 }; 3514 #endif 3515 3516 static int clk_duty_cycle_show(struct seq_file *s, void *data) 3517 { 3518 struct clk_core *core = s->private; 3519 struct clk_duty *duty = &core->duty; 3520 3521 seq_printf(s, "%u/%u\n", duty->num, duty->den); 3522 3523 return 0; 3524 } 3525 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); 3526 3527 static int clk_min_rate_show(struct seq_file *s, void *data) 3528 { 3529 struct clk_core *core = s->private; 3530 unsigned long min_rate, max_rate; 3531 3532 clk_prepare_lock(); 3533 clk_core_get_boundaries(core, &min_rate, &max_rate); 3534 clk_prepare_unlock(); 3535 seq_printf(s, "%lu\n", min_rate); 3536 3537 return 0; 3538 } 3539 DEFINE_SHOW_ATTRIBUTE(clk_min_rate); 3540 3541 static int clk_max_rate_show(struct seq_file *s, void *data) 3542 { 3543 struct clk_core *core = s->private; 3544 unsigned long min_rate, max_rate; 3545 3546 clk_prepare_lock(); 3547 clk_core_get_boundaries(core, &min_rate, &max_rate); 3548 clk_prepare_unlock(); 3549 seq_printf(s, "%lu\n", max_rate); 3550 3551 return 0; 3552 } 3553 DEFINE_SHOW_ATTRIBUTE(clk_max_rate); 3554 3555 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 3556 { 3557 struct dentry *root; 3558 3559 if (!core || !pdentry) 3560 return; 3561 3562 root = debugfs_create_dir(core->name, pdentry); 3563 core->dentry = root; 3564 3565 debugfs_create_file("clk_rate", clk_rate_mode, root, core, 3566 &clk_rate_fops); 3567 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); 3568 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); 3569 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); 3570 debugfs_create_u32("clk_phase", 0444, root, &core->phase); 3571 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); 3572 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); 3573 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); 3574 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); 3575 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); 3576 debugfs_create_file("clk_duty_cycle", 0444, root, core, 3577 &clk_duty_cycle_fops); 3578 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3579 debugfs_create_file("clk_prepare_enable", 0644, root, core, 3580 &clk_prepare_enable_fops); 3581 3582 if (core->num_parents > 1) 3583 debugfs_create_file("clk_parent", 0644, root, core, 3584 ¤t_parent_rw_fops); 3585 else 3586 #endif 3587 if (core->num_parents > 0) 3588 debugfs_create_file("clk_parent", 0444, root, core, 3589 ¤t_parent_fops); 3590 3591 if (core->num_parents > 1) 3592 debugfs_create_file("clk_possible_parents", 0444, root, core, 3593 &possible_parents_fops); 3594 3595 if (core->ops->debug_init) 3596 core->ops->debug_init(core->hw, core->dentry); 3597 } 3598 3599 /** 3600 * clk_debug_register - add a clk node to the debugfs clk directory 3601 * @core: the clk being added to the debugfs clk directory 3602 * 3603 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 3604 * initialized. Otherwise it bails out early since the debugfs clk directory 3605 * will be created lazily by clk_debug_init as part of a late_initcall. 3606 */ 3607 static void clk_debug_register(struct clk_core *core) 3608 { 3609 mutex_lock(&clk_debug_lock); 3610 hlist_add_head(&core->debug_node, &clk_debug_list); 3611 if (inited) 3612 clk_debug_create_one(core, rootdir); 3613 mutex_unlock(&clk_debug_lock); 3614 } 3615 3616 /** 3617 * clk_debug_unregister - remove a clk node from the debugfs clk directory 3618 * @core: the clk being removed from the debugfs clk directory 3619 * 3620 * Dynamically removes a clk and all its child nodes from the 3621 * debugfs clk directory if clk->dentry points to debugfs created by 3622 * clk_debug_register in __clk_core_init. 3623 */ 3624 static void clk_debug_unregister(struct clk_core *core) 3625 { 3626 mutex_lock(&clk_debug_lock); 3627 hlist_del_init(&core->debug_node); 3628 debugfs_remove_recursive(core->dentry); 3629 core->dentry = NULL; 3630 mutex_unlock(&clk_debug_lock); 3631 } 3632 3633 /** 3634 * clk_debug_init - lazily populate the debugfs clk directory 3635 * 3636 * clks are often initialized very early during boot before memory can be 3637 * dynamically allocated and well before debugfs is setup. This function 3638 * populates the debugfs clk directory once at boot-time when we know that 3639 * debugfs is setup. It should only be called once at boot-time, all other clks 3640 * added dynamically will be done so with clk_debug_register. 3641 */ 3642 static int __init clk_debug_init(void) 3643 { 3644 struct clk_core *core; 3645 3646 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3647 pr_warn("\n"); 3648 pr_warn("********************************************************************\n"); 3649 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3650 pr_warn("** **\n"); 3651 pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n"); 3652 pr_warn("** **\n"); 3653 pr_warn("** This means that this kernel is built to expose clk operations **\n"); 3654 pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n"); 3655 pr_warn("** to userspace, which may compromise security on your system. **\n"); 3656 pr_warn("** **\n"); 3657 pr_warn("** If you see this message and you are not debugging the **\n"); 3658 pr_warn("** kernel, report this immediately to your vendor! **\n"); 3659 pr_warn("** **\n"); 3660 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3661 pr_warn("********************************************************************\n"); 3662 #endif 3663 3664 rootdir = debugfs_create_dir("clk", NULL); 3665 3666 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, 3667 &clk_summary_fops); 3668 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, 3669 &clk_dump_fops); 3670 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, 3671 &clk_summary_fops); 3672 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, 3673 &clk_dump_fops); 3674 3675 mutex_lock(&clk_debug_lock); 3676 hlist_for_each_entry(core, &clk_debug_list, debug_node) 3677 clk_debug_create_one(core, rootdir); 3678 3679 inited = 1; 3680 mutex_unlock(&clk_debug_lock); 3681 3682 return 0; 3683 } 3684 late_initcall(clk_debug_init); 3685 #else 3686 static inline void clk_debug_register(struct clk_core *core) { } 3687 static inline void clk_debug_unregister(struct clk_core *core) 3688 { 3689 } 3690 #endif 3691 3692 static void clk_core_reparent_orphans_nolock(void) 3693 { 3694 struct clk_core *orphan; 3695 struct hlist_node *tmp2; 3696 3697 /* 3698 * walk the list of orphan clocks and reparent any that newly finds a 3699 * parent. 3700 */ 3701 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 3702 struct clk_core *parent = __clk_init_parent(orphan); 3703 3704 /* 3705 * We need to use __clk_set_parent_before() and _after() to 3706 * properly migrate any prepare/enable count of the orphan 3707 * clock. This is important for CLK_IS_CRITICAL clocks, which 3708 * are enabled during init but might not have a parent yet. 3709 */ 3710 if (parent) { 3711 /* update the clk tree topology */ 3712 __clk_set_parent_before(orphan, parent); 3713 __clk_set_parent_after(orphan, parent, NULL); 3714 __clk_recalc_accuracies(orphan); 3715 __clk_recalc_rates(orphan, true, 0); 3716 3717 /* 3718 * __clk_init_parent() will set the initial req_rate to 3719 * 0 if the clock doesn't have clk_ops::recalc_rate and 3720 * is an orphan when it's registered. 3721 * 3722 * 'req_rate' is used by clk_set_rate_range() and 3723 * clk_put() to trigger a clk_set_rate() call whenever 3724 * the boundaries are modified. Let's make sure 3725 * 'req_rate' is set to something non-zero so that 3726 * clk_set_rate_range() doesn't drop the frequency. 3727 */ 3728 orphan->req_rate = orphan->rate; 3729 } 3730 } 3731 } 3732 3733 /** 3734 * __clk_core_init - initialize the data structures in a struct clk_core 3735 * @core: clk_core being initialized 3736 * 3737 * Initializes the lists in struct clk_core, queries the hardware for the 3738 * parent and rate and sets them both. 3739 */ 3740 static int __clk_core_init(struct clk_core *core) 3741 { 3742 int ret; 3743 struct clk_core *parent; 3744 unsigned long rate; 3745 int phase; 3746 3747 clk_prepare_lock(); 3748 3749 /* 3750 * Set hw->core after grabbing the prepare_lock to synchronize with 3751 * callers of clk_core_fill_parent_index() where we treat hw->core 3752 * being NULL as the clk not being registered yet. This is crucial so 3753 * that clks aren't parented until their parent is fully registered. 3754 */ 3755 core->hw->core = core; 3756 3757 ret = clk_pm_runtime_get(core); 3758 if (ret) 3759 goto unlock; 3760 3761 /* check to see if a clock with this name is already registered */ 3762 if (clk_core_lookup(core->name)) { 3763 pr_debug("%s: clk %s already initialized\n", 3764 __func__, core->name); 3765 ret = -EEXIST; 3766 goto out; 3767 } 3768 3769 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ 3770 if (core->ops->set_rate && 3771 !((core->ops->round_rate || core->ops->determine_rate) && 3772 core->ops->recalc_rate)) { 3773 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 3774 __func__, core->name); 3775 ret = -EINVAL; 3776 goto out; 3777 } 3778 3779 if (core->ops->set_parent && !core->ops->get_parent) { 3780 pr_err("%s: %s must implement .get_parent & .set_parent\n", 3781 __func__, core->name); 3782 ret = -EINVAL; 3783 goto out; 3784 } 3785 3786 if (core->ops->set_parent && !core->ops->determine_rate) { 3787 pr_err("%s: %s must implement .set_parent & .determine_rate\n", 3788 __func__, core->name); 3789 ret = -EINVAL; 3790 goto out; 3791 } 3792 3793 if (core->num_parents > 1 && !core->ops->get_parent) { 3794 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 3795 __func__, core->name); 3796 ret = -EINVAL; 3797 goto out; 3798 } 3799 3800 if (core->ops->set_rate_and_parent && 3801 !(core->ops->set_parent && core->ops->set_rate)) { 3802 pr_err("%s: %s must implement .set_parent & .set_rate\n", 3803 __func__, core->name); 3804 ret = -EINVAL; 3805 goto out; 3806 } 3807 3808 /* 3809 * optional platform-specific magic 3810 * 3811 * The .init callback is not used by any of the basic clock types, but 3812 * exists for weird hardware that must perform initialization magic for 3813 * CCF to get an accurate view of clock for any other callbacks. It may 3814 * also be used needs to perform dynamic allocations. Such allocation 3815 * must be freed in the terminate() callback. 3816 * This callback shall not be used to initialize the parameters state, 3817 * such as rate, parent, etc ... 3818 * 3819 * If it exist, this callback should called before any other callback of 3820 * the clock 3821 */ 3822 if (core->ops->init) { 3823 ret = core->ops->init(core->hw); 3824 if (ret) 3825 goto out; 3826 } 3827 3828 parent = core->parent = __clk_init_parent(core); 3829 3830 /* 3831 * Populate core->parent if parent has already been clk_core_init'd. If 3832 * parent has not yet been clk_core_init'd then place clk in the orphan 3833 * list. If clk doesn't have any parents then place it in the root 3834 * clk list. 3835 * 3836 * Every time a new clk is clk_init'd then we walk the list of orphan 3837 * clocks and re-parent any that are children of the clock currently 3838 * being clk_init'd. 3839 */ 3840 if (parent) { 3841 hlist_add_head(&core->child_node, &parent->children); 3842 core->orphan = parent->orphan; 3843 } else if (!core->num_parents) { 3844 hlist_add_head(&core->child_node, &clk_root_list); 3845 core->orphan = false; 3846 } else { 3847 hlist_add_head(&core->child_node, &clk_orphan_list); 3848 core->orphan = true; 3849 } 3850 3851 /* 3852 * Set clk's accuracy. The preferred method is to use 3853 * .recalc_accuracy. For simple clocks and lazy developers the default 3854 * fallback is to use the parent's accuracy. If a clock doesn't have a 3855 * parent (or is orphaned) then accuracy is set to zero (perfect 3856 * clock). 3857 */ 3858 if (core->ops->recalc_accuracy) 3859 core->accuracy = core->ops->recalc_accuracy(core->hw, 3860 clk_core_get_accuracy_no_lock(parent)); 3861 else if (parent) 3862 core->accuracy = parent->accuracy; 3863 else 3864 core->accuracy = 0; 3865 3866 /* 3867 * Set clk's phase by clk_core_get_phase() caching the phase. 3868 * Since a phase is by definition relative to its parent, just 3869 * query the current clock phase, or just assume it's in phase. 3870 */ 3871 phase = clk_core_get_phase(core); 3872 if (phase < 0) { 3873 ret = phase; 3874 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__, 3875 core->name); 3876 goto out; 3877 } 3878 3879 /* 3880 * Set clk's duty cycle. 3881 */ 3882 clk_core_update_duty_cycle_nolock(core); 3883 3884 /* 3885 * Set clk's rate. The preferred method is to use .recalc_rate. For 3886 * simple clocks and lazy developers the default fallback is to use the 3887 * parent's rate. If a clock doesn't have a parent (or is orphaned) 3888 * then rate is set to zero. 3889 */ 3890 if (core->ops->recalc_rate) 3891 rate = core->ops->recalc_rate(core->hw, 3892 clk_core_get_rate_nolock(parent)); 3893 else if (parent) 3894 rate = parent->rate; 3895 else 3896 rate = 0; 3897 core->rate = core->req_rate = rate; 3898 3899 /* 3900 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks 3901 * don't get accidentally disabled when walking the orphan tree and 3902 * reparenting clocks 3903 */ 3904 if (core->flags & CLK_IS_CRITICAL) { 3905 ret = clk_core_prepare(core); 3906 if (ret) { 3907 pr_warn("%s: critical clk '%s' failed to prepare\n", 3908 __func__, core->name); 3909 goto out; 3910 } 3911 3912 ret = clk_core_enable_lock(core); 3913 if (ret) { 3914 pr_warn("%s: critical clk '%s' failed to enable\n", 3915 __func__, core->name); 3916 clk_core_unprepare(core); 3917 goto out; 3918 } 3919 } 3920 3921 clk_core_reparent_orphans_nolock(); 3922 3923 kref_init(&core->ref); 3924 out: 3925 clk_pm_runtime_put(core); 3926 unlock: 3927 if (ret) { 3928 hlist_del_init(&core->child_node); 3929 core->hw->core = NULL; 3930 } 3931 3932 clk_prepare_unlock(); 3933 3934 if (!ret) 3935 clk_debug_register(core); 3936 3937 return ret; 3938 } 3939 3940 /** 3941 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core 3942 * @core: clk to add consumer to 3943 * @clk: consumer to link to a clk 3944 */ 3945 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) 3946 { 3947 clk_prepare_lock(); 3948 hlist_add_head(&clk->clks_node, &core->clks); 3949 clk_prepare_unlock(); 3950 } 3951 3952 /** 3953 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core 3954 * @clk: consumer to unlink 3955 */ 3956 static void clk_core_unlink_consumer(struct clk *clk) 3957 { 3958 lockdep_assert_held(&prepare_lock); 3959 hlist_del(&clk->clks_node); 3960 } 3961 3962 /** 3963 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core 3964 * @core: clk to allocate a consumer for 3965 * @dev_id: string describing device name 3966 * @con_id: connection ID string on device 3967 * 3968 * Returns: clk consumer left unlinked from the consumer list 3969 */ 3970 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, 3971 const char *con_id) 3972 { 3973 struct clk *clk; 3974 3975 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 3976 if (!clk) 3977 return ERR_PTR(-ENOMEM); 3978 3979 clk->core = core; 3980 clk->dev_id = dev_id; 3981 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 3982 clk->max_rate = ULONG_MAX; 3983 3984 return clk; 3985 } 3986 3987 /** 3988 * free_clk - Free a clk consumer 3989 * @clk: clk consumer to free 3990 * 3991 * Note, this assumes the clk has been unlinked from the clk_core consumer 3992 * list. 3993 */ 3994 static void free_clk(struct clk *clk) 3995 { 3996 kfree_const(clk->con_id); 3997 kfree(clk); 3998 } 3999 4000 /** 4001 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given 4002 * a clk_hw 4003 * @dev: clk consumer device 4004 * @hw: clk_hw associated with the clk being consumed 4005 * @dev_id: string describing device name 4006 * @con_id: connection ID string on device 4007 * 4008 * This is the main function used to create a clk pointer for use by clk 4009 * consumers. It connects a consumer to the clk_core and clk_hw structures 4010 * used by the framework and clk provider respectively. 4011 */ 4012 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, 4013 const char *dev_id, const char *con_id) 4014 { 4015 struct clk *clk; 4016 struct clk_core *core; 4017 4018 /* This is to allow this function to be chained to others */ 4019 if (IS_ERR_OR_NULL(hw)) 4020 return ERR_CAST(hw); 4021 4022 core = hw->core; 4023 clk = alloc_clk(core, dev_id, con_id); 4024 if (IS_ERR(clk)) 4025 return clk; 4026 clk->dev = dev; 4027 4028 if (!try_module_get(core->owner)) { 4029 free_clk(clk); 4030 return ERR_PTR(-ENOENT); 4031 } 4032 4033 kref_get(&core->ref); 4034 clk_core_link_consumer(core, clk); 4035 4036 return clk; 4037 } 4038 4039 /** 4040 * clk_hw_get_clk - get clk consumer given an clk_hw 4041 * @hw: clk_hw associated with the clk being consumed 4042 * @con_id: connection ID string on device 4043 * 4044 * Returns: new clk consumer 4045 * This is the function to be used by providers which need 4046 * to get a consumer clk and act on the clock element 4047 * Calls to this function must be balanced with calls clk_put() 4048 */ 4049 struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id) 4050 { 4051 struct device *dev = hw->core->dev; 4052 const char *name = dev ? dev_name(dev) : NULL; 4053 4054 return clk_hw_create_clk(dev, hw, name, con_id); 4055 } 4056 EXPORT_SYMBOL(clk_hw_get_clk); 4057 4058 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist) 4059 { 4060 const char *dst; 4061 4062 if (!src) { 4063 if (must_exist) 4064 return -EINVAL; 4065 return 0; 4066 } 4067 4068 *dst_p = dst = kstrdup_const(src, GFP_KERNEL); 4069 if (!dst) 4070 return -ENOMEM; 4071 4072 return 0; 4073 } 4074 4075 static int clk_core_populate_parent_map(struct clk_core *core, 4076 const struct clk_init_data *init) 4077 { 4078 u8 num_parents = init->num_parents; 4079 const char * const *parent_names = init->parent_names; 4080 const struct clk_hw **parent_hws = init->parent_hws; 4081 const struct clk_parent_data *parent_data = init->parent_data; 4082 int i, ret = 0; 4083 struct clk_parent_map *parents, *parent; 4084 4085 if (!num_parents) 4086 return 0; 4087 4088 /* 4089 * Avoid unnecessary string look-ups of clk_core's possible parents by 4090 * having a cache of names/clk_hw pointers to clk_core pointers. 4091 */ 4092 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); 4093 core->parents = parents; 4094 if (!parents) 4095 return -ENOMEM; 4096 4097 /* Copy everything over because it might be __initdata */ 4098 for (i = 0, parent = parents; i < num_parents; i++, parent++) { 4099 parent->index = -1; 4100 if (parent_names) { 4101 /* throw a WARN if any entries are NULL */ 4102 WARN(!parent_names[i], 4103 "%s: invalid NULL in %s's .parent_names\n", 4104 __func__, core->name); 4105 ret = clk_cpy_name(&parent->name, parent_names[i], 4106 true); 4107 } else if (parent_data) { 4108 parent->hw = parent_data[i].hw; 4109 parent->index = parent_data[i].index; 4110 ret = clk_cpy_name(&parent->fw_name, 4111 parent_data[i].fw_name, false); 4112 if (!ret) 4113 ret = clk_cpy_name(&parent->name, 4114 parent_data[i].name, 4115 false); 4116 } else if (parent_hws) { 4117 parent->hw = parent_hws[i]; 4118 } else { 4119 ret = -EINVAL; 4120 WARN(1, "Must specify parents if num_parents > 0\n"); 4121 } 4122 4123 if (ret) { 4124 do { 4125 kfree_const(parents[i].name); 4126 kfree_const(parents[i].fw_name); 4127 } while (--i >= 0); 4128 kfree(parents); 4129 4130 return ret; 4131 } 4132 } 4133 4134 return 0; 4135 } 4136 4137 static void clk_core_free_parent_map(struct clk_core *core) 4138 { 4139 int i = core->num_parents; 4140 4141 if (!core->num_parents) 4142 return; 4143 4144 while (--i >= 0) { 4145 kfree_const(core->parents[i].name); 4146 kfree_const(core->parents[i].fw_name); 4147 } 4148 4149 kfree(core->parents); 4150 } 4151 4152 static struct clk * 4153 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) 4154 { 4155 int ret; 4156 struct clk_core *core; 4157 const struct clk_init_data *init = hw->init; 4158 4159 /* 4160 * The init data is not supposed to be used outside of registration path. 4161 * Set it to NULL so that provider drivers can't use it either and so that 4162 * we catch use of hw->init early on in the core. 4163 */ 4164 hw->init = NULL; 4165 4166 core = kzalloc(sizeof(*core), GFP_KERNEL); 4167 if (!core) { 4168 ret = -ENOMEM; 4169 goto fail_out; 4170 } 4171 4172 core->name = kstrdup_const(init->name, GFP_KERNEL); 4173 if (!core->name) { 4174 ret = -ENOMEM; 4175 goto fail_name; 4176 } 4177 4178 if (WARN_ON(!init->ops)) { 4179 ret = -EINVAL; 4180 goto fail_ops; 4181 } 4182 core->ops = init->ops; 4183 4184 if (dev && pm_runtime_enabled(dev)) 4185 core->rpm_enabled = true; 4186 core->dev = dev; 4187 core->of_node = np; 4188 if (dev && dev->driver) 4189 core->owner = dev->driver->owner; 4190 core->hw = hw; 4191 core->flags = init->flags; 4192 core->num_parents = init->num_parents; 4193 core->min_rate = 0; 4194 core->max_rate = ULONG_MAX; 4195 4196 ret = clk_core_populate_parent_map(core, init); 4197 if (ret) 4198 goto fail_parents; 4199 4200 INIT_HLIST_HEAD(&core->clks); 4201 4202 /* 4203 * Don't call clk_hw_create_clk() here because that would pin the 4204 * provider module to itself and prevent it from ever being removed. 4205 */ 4206 hw->clk = alloc_clk(core, NULL, NULL); 4207 if (IS_ERR(hw->clk)) { 4208 ret = PTR_ERR(hw->clk); 4209 goto fail_create_clk; 4210 } 4211 4212 clk_core_link_consumer(core, hw->clk); 4213 4214 ret = __clk_core_init(core); 4215 if (!ret) 4216 return hw->clk; 4217 4218 clk_prepare_lock(); 4219 clk_core_unlink_consumer(hw->clk); 4220 clk_prepare_unlock(); 4221 4222 free_clk(hw->clk); 4223 hw->clk = NULL; 4224 4225 fail_create_clk: 4226 clk_core_free_parent_map(core); 4227 fail_parents: 4228 fail_ops: 4229 kfree_const(core->name); 4230 fail_name: 4231 kfree(core); 4232 fail_out: 4233 return ERR_PTR(ret); 4234 } 4235 4236 /** 4237 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent 4238 * @dev: Device to get device node of 4239 * 4240 * Return: device node pointer of @dev, or the device node pointer of 4241 * @dev->parent if dev doesn't have a device node, or NULL if neither 4242 * @dev or @dev->parent have a device node. 4243 */ 4244 static struct device_node *dev_or_parent_of_node(struct device *dev) 4245 { 4246 struct device_node *np; 4247 4248 if (!dev) 4249 return NULL; 4250 4251 np = dev_of_node(dev); 4252 if (!np) 4253 np = dev_of_node(dev->parent); 4254 4255 return np; 4256 } 4257 4258 /** 4259 * clk_register - allocate a new clock, register it and return an opaque cookie 4260 * @dev: device that is registering this clock 4261 * @hw: link to hardware-specific clock data 4262 * 4263 * clk_register is the *deprecated* interface for populating the clock tree with 4264 * new clock nodes. Use clk_hw_register() instead. 4265 * 4266 * Returns: a pointer to the newly allocated struct clk which 4267 * cannot be dereferenced by driver code but may be used in conjunction with the 4268 * rest of the clock API. In the event of an error clk_register will return an 4269 * error code; drivers must test for an error code after calling clk_register. 4270 */ 4271 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 4272 { 4273 return __clk_register(dev, dev_or_parent_of_node(dev), hw); 4274 } 4275 EXPORT_SYMBOL_GPL(clk_register); 4276 4277 /** 4278 * clk_hw_register - register a clk_hw and return an error code 4279 * @dev: device that is registering this clock 4280 * @hw: link to hardware-specific clock data 4281 * 4282 * clk_hw_register is the primary interface for populating the clock tree with 4283 * new clock nodes. It returns an integer equal to zero indicating success or 4284 * less than zero indicating failure. Drivers must test for an error code after 4285 * calling clk_hw_register(). 4286 */ 4287 int clk_hw_register(struct device *dev, struct clk_hw *hw) 4288 { 4289 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev), 4290 hw)); 4291 } 4292 EXPORT_SYMBOL_GPL(clk_hw_register); 4293 4294 /* 4295 * of_clk_hw_register - register a clk_hw and return an error code 4296 * @node: device_node of device that is registering this clock 4297 * @hw: link to hardware-specific clock data 4298 * 4299 * of_clk_hw_register() is the primary interface for populating the clock tree 4300 * with new clock nodes when a struct device is not available, but a struct 4301 * device_node is. It returns an integer equal to zero indicating success or 4302 * less than zero indicating failure. Drivers must test for an error code after 4303 * calling of_clk_hw_register(). 4304 */ 4305 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw) 4306 { 4307 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw)); 4308 } 4309 EXPORT_SYMBOL_GPL(of_clk_hw_register); 4310 4311 /* Free memory allocated for a clock. */ 4312 static void __clk_release(struct kref *ref) 4313 { 4314 struct clk_core *core = container_of(ref, struct clk_core, ref); 4315 4316 lockdep_assert_held(&prepare_lock); 4317 4318 clk_core_free_parent_map(core); 4319 kfree_const(core->name); 4320 kfree(core); 4321 } 4322 4323 /* 4324 * Empty clk_ops for unregistered clocks. These are used temporarily 4325 * after clk_unregister() was called on a clock and until last clock 4326 * consumer calls clk_put() and the struct clk object is freed. 4327 */ 4328 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 4329 { 4330 return -ENXIO; 4331 } 4332 4333 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 4334 { 4335 WARN_ON_ONCE(1); 4336 } 4337 4338 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 4339 unsigned long parent_rate) 4340 { 4341 return -ENXIO; 4342 } 4343 4344 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 4345 { 4346 return -ENXIO; 4347 } 4348 4349 static int clk_nodrv_determine_rate(struct clk_hw *hw, 4350 struct clk_rate_request *req) 4351 { 4352 return -ENXIO; 4353 } 4354 4355 static const struct clk_ops clk_nodrv_ops = { 4356 .enable = clk_nodrv_prepare_enable, 4357 .disable = clk_nodrv_disable_unprepare, 4358 .prepare = clk_nodrv_prepare_enable, 4359 .unprepare = clk_nodrv_disable_unprepare, 4360 .determine_rate = clk_nodrv_determine_rate, 4361 .set_rate = clk_nodrv_set_rate, 4362 .set_parent = clk_nodrv_set_parent, 4363 }; 4364 4365 static void clk_core_evict_parent_cache_subtree(struct clk_core *root, 4366 const struct clk_core *target) 4367 { 4368 int i; 4369 struct clk_core *child; 4370 4371 for (i = 0; i < root->num_parents; i++) 4372 if (root->parents[i].core == target) 4373 root->parents[i].core = NULL; 4374 4375 hlist_for_each_entry(child, &root->children, child_node) 4376 clk_core_evict_parent_cache_subtree(child, target); 4377 } 4378 4379 /* Remove this clk from all parent caches */ 4380 static void clk_core_evict_parent_cache(struct clk_core *core) 4381 { 4382 const struct hlist_head **lists; 4383 struct clk_core *root; 4384 4385 lockdep_assert_held(&prepare_lock); 4386 4387 for (lists = all_lists; *lists; lists++) 4388 hlist_for_each_entry(root, *lists, child_node) 4389 clk_core_evict_parent_cache_subtree(root, core); 4390 4391 } 4392 4393 /** 4394 * clk_unregister - unregister a currently registered clock 4395 * @clk: clock to unregister 4396 */ 4397 void clk_unregister(struct clk *clk) 4398 { 4399 unsigned long flags; 4400 const struct clk_ops *ops; 4401 4402 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4403 return; 4404 4405 clk_debug_unregister(clk->core); 4406 4407 clk_prepare_lock(); 4408 4409 ops = clk->core->ops; 4410 if (ops == &clk_nodrv_ops) { 4411 pr_err("%s: unregistered clock: %s\n", __func__, 4412 clk->core->name); 4413 goto unlock; 4414 } 4415 /* 4416 * Assign empty clock ops for consumers that might still hold 4417 * a reference to this clock. 4418 */ 4419 flags = clk_enable_lock(); 4420 clk->core->ops = &clk_nodrv_ops; 4421 clk_enable_unlock(flags); 4422 4423 if (ops->terminate) 4424 ops->terminate(clk->core->hw); 4425 4426 if (!hlist_empty(&clk->core->children)) { 4427 struct clk_core *child; 4428 struct hlist_node *t; 4429 4430 /* Reparent all children to the orphan list. */ 4431 hlist_for_each_entry_safe(child, t, &clk->core->children, 4432 child_node) 4433 clk_core_set_parent_nolock(child, NULL); 4434 } 4435 4436 clk_core_evict_parent_cache(clk->core); 4437 4438 hlist_del_init(&clk->core->child_node); 4439 4440 if (clk->core->prepare_count) 4441 pr_warn("%s: unregistering prepared clock: %s\n", 4442 __func__, clk->core->name); 4443 4444 if (clk->core->protect_count) 4445 pr_warn("%s: unregistering protected clock: %s\n", 4446 __func__, clk->core->name); 4447 4448 kref_put(&clk->core->ref, __clk_release); 4449 free_clk(clk); 4450 unlock: 4451 clk_prepare_unlock(); 4452 } 4453 EXPORT_SYMBOL_GPL(clk_unregister); 4454 4455 /** 4456 * clk_hw_unregister - unregister a currently registered clk_hw 4457 * @hw: hardware-specific clock data to unregister 4458 */ 4459 void clk_hw_unregister(struct clk_hw *hw) 4460 { 4461 clk_unregister(hw->clk); 4462 } 4463 EXPORT_SYMBOL_GPL(clk_hw_unregister); 4464 4465 static void devm_clk_unregister_cb(struct device *dev, void *res) 4466 { 4467 clk_unregister(*(struct clk **)res); 4468 } 4469 4470 static void devm_clk_hw_unregister_cb(struct device *dev, void *res) 4471 { 4472 clk_hw_unregister(*(struct clk_hw **)res); 4473 } 4474 4475 /** 4476 * devm_clk_register - resource managed clk_register() 4477 * @dev: device that is registering this clock 4478 * @hw: link to hardware-specific clock data 4479 * 4480 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. 4481 * 4482 * Clocks returned from this function are automatically clk_unregister()ed on 4483 * driver detach. See clk_register() for more information. 4484 */ 4485 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 4486 { 4487 struct clk *clk; 4488 struct clk **clkp; 4489 4490 clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL); 4491 if (!clkp) 4492 return ERR_PTR(-ENOMEM); 4493 4494 clk = clk_register(dev, hw); 4495 if (!IS_ERR(clk)) { 4496 *clkp = clk; 4497 devres_add(dev, clkp); 4498 } else { 4499 devres_free(clkp); 4500 } 4501 4502 return clk; 4503 } 4504 EXPORT_SYMBOL_GPL(devm_clk_register); 4505 4506 /** 4507 * devm_clk_hw_register - resource managed clk_hw_register() 4508 * @dev: device that is registering this clock 4509 * @hw: link to hardware-specific clock data 4510 * 4511 * Managed clk_hw_register(). Clocks registered by this function are 4512 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 4513 * for more information. 4514 */ 4515 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 4516 { 4517 struct clk_hw **hwp; 4518 int ret; 4519 4520 hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL); 4521 if (!hwp) 4522 return -ENOMEM; 4523 4524 ret = clk_hw_register(dev, hw); 4525 if (!ret) { 4526 *hwp = hw; 4527 devres_add(dev, hwp); 4528 } else { 4529 devres_free(hwp); 4530 } 4531 4532 return ret; 4533 } 4534 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 4535 4536 static void devm_clk_release(struct device *dev, void *res) 4537 { 4538 clk_put(*(struct clk **)res); 4539 } 4540 4541 /** 4542 * devm_clk_hw_get_clk - resource managed clk_hw_get_clk() 4543 * @dev: device that is registering this clock 4544 * @hw: clk_hw associated with the clk being consumed 4545 * @con_id: connection ID string on device 4546 * 4547 * Managed clk_hw_get_clk(). Clocks got with this function are 4548 * automatically clk_put() on driver detach. See clk_put() 4549 * for more information. 4550 */ 4551 struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw, 4552 const char *con_id) 4553 { 4554 struct clk *clk; 4555 struct clk **clkp; 4556 4557 /* This should not happen because it would mean we have drivers 4558 * passing around clk_hw pointers instead of having the caller use 4559 * proper clk_get() style APIs 4560 */ 4561 WARN_ON_ONCE(dev != hw->core->dev); 4562 4563 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 4564 if (!clkp) 4565 return ERR_PTR(-ENOMEM); 4566 4567 clk = clk_hw_get_clk(hw, con_id); 4568 if (!IS_ERR(clk)) { 4569 *clkp = clk; 4570 devres_add(dev, clkp); 4571 } else { 4572 devres_free(clkp); 4573 } 4574 4575 return clk; 4576 } 4577 EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk); 4578 4579 /* 4580 * clkdev helpers 4581 */ 4582 4583 void __clk_put(struct clk *clk) 4584 { 4585 struct module *owner; 4586 4587 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4588 return; 4589 4590 clk_prepare_lock(); 4591 4592 /* 4593 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a 4594 * given user should be balanced with calls to clk_rate_exclusive_put() 4595 * and by that same consumer 4596 */ 4597 if (WARN_ON(clk->exclusive_count)) { 4598 /* We voiced our concern, let's sanitize the situation */ 4599 clk->core->protect_count -= (clk->exclusive_count - 1); 4600 clk_core_rate_unprotect(clk->core); 4601 clk->exclusive_count = 0; 4602 } 4603 4604 hlist_del(&clk->clks_node); 4605 4606 /* If we had any boundaries on that clock, let's drop them. */ 4607 if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) 4608 clk_set_rate_range_nolock(clk, 0, ULONG_MAX); 4609 4610 owner = clk->core->owner; 4611 kref_put(&clk->core->ref, __clk_release); 4612 4613 clk_prepare_unlock(); 4614 4615 module_put(owner); 4616 4617 free_clk(clk); 4618 } 4619 4620 /*** clk rate change notifiers ***/ 4621 4622 /** 4623 * clk_notifier_register - add a clk rate change notifier 4624 * @clk: struct clk * to watch 4625 * @nb: struct notifier_block * with callback info 4626 * 4627 * Request notification when clk's rate changes. This uses an SRCU 4628 * notifier because we want it to block and notifier unregistrations are 4629 * uncommon. The callbacks associated with the notifier must not 4630 * re-enter into the clk framework by calling any top-level clk APIs; 4631 * this will cause a nested prepare_lock mutex. 4632 * 4633 * In all notification cases (pre, post and abort rate change) the original 4634 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 4635 * and the new frequency is passed via struct clk_notifier_data.new_rate. 4636 * 4637 * clk_notifier_register() must be called from non-atomic context. 4638 * Returns -EINVAL if called with null arguments, -ENOMEM upon 4639 * allocation failure; otherwise, passes along the return value of 4640 * srcu_notifier_chain_register(). 4641 */ 4642 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 4643 { 4644 struct clk_notifier *cn; 4645 int ret = -ENOMEM; 4646 4647 if (!clk || !nb) 4648 return -EINVAL; 4649 4650 clk_prepare_lock(); 4651 4652 /* search the list of notifiers for this clk */ 4653 list_for_each_entry(cn, &clk_notifier_list, node) 4654 if (cn->clk == clk) 4655 goto found; 4656 4657 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 4658 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 4659 if (!cn) 4660 goto out; 4661 4662 cn->clk = clk; 4663 srcu_init_notifier_head(&cn->notifier_head); 4664 4665 list_add(&cn->node, &clk_notifier_list); 4666 4667 found: 4668 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 4669 4670 clk->core->notifier_count++; 4671 4672 out: 4673 clk_prepare_unlock(); 4674 4675 return ret; 4676 } 4677 EXPORT_SYMBOL_GPL(clk_notifier_register); 4678 4679 /** 4680 * clk_notifier_unregister - remove a clk rate change notifier 4681 * @clk: struct clk * 4682 * @nb: struct notifier_block * with callback info 4683 * 4684 * Request no further notification for changes to 'clk' and frees memory 4685 * allocated in clk_notifier_register. 4686 * 4687 * Returns -EINVAL if called with null arguments; otherwise, passes 4688 * along the return value of srcu_notifier_chain_unregister(). 4689 */ 4690 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 4691 { 4692 struct clk_notifier *cn; 4693 int ret = -ENOENT; 4694 4695 if (!clk || !nb) 4696 return -EINVAL; 4697 4698 clk_prepare_lock(); 4699 4700 list_for_each_entry(cn, &clk_notifier_list, node) { 4701 if (cn->clk == clk) { 4702 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 4703 4704 clk->core->notifier_count--; 4705 4706 /* XXX the notifier code should handle this better */ 4707 if (!cn->notifier_head.head) { 4708 srcu_cleanup_notifier_head(&cn->notifier_head); 4709 list_del(&cn->node); 4710 kfree(cn); 4711 } 4712 break; 4713 } 4714 } 4715 4716 clk_prepare_unlock(); 4717 4718 return ret; 4719 } 4720 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 4721 4722 struct clk_notifier_devres { 4723 struct clk *clk; 4724 struct notifier_block *nb; 4725 }; 4726 4727 static void devm_clk_notifier_release(struct device *dev, void *res) 4728 { 4729 struct clk_notifier_devres *devres = res; 4730 4731 clk_notifier_unregister(devres->clk, devres->nb); 4732 } 4733 4734 int devm_clk_notifier_register(struct device *dev, struct clk *clk, 4735 struct notifier_block *nb) 4736 { 4737 struct clk_notifier_devres *devres; 4738 int ret; 4739 4740 devres = devres_alloc(devm_clk_notifier_release, 4741 sizeof(*devres), GFP_KERNEL); 4742 4743 if (!devres) 4744 return -ENOMEM; 4745 4746 ret = clk_notifier_register(clk, nb); 4747 if (!ret) { 4748 devres->clk = clk; 4749 devres->nb = nb; 4750 devres_add(dev, devres); 4751 } else { 4752 devres_free(devres); 4753 } 4754 4755 return ret; 4756 } 4757 EXPORT_SYMBOL_GPL(devm_clk_notifier_register); 4758 4759 #ifdef CONFIG_OF 4760 static void clk_core_reparent_orphans(void) 4761 { 4762 clk_prepare_lock(); 4763 clk_core_reparent_orphans_nolock(); 4764 clk_prepare_unlock(); 4765 } 4766 4767 /** 4768 * struct of_clk_provider - Clock provider registration structure 4769 * @link: Entry in global list of clock providers 4770 * @node: Pointer to device tree node of clock provider 4771 * @get: Get clock callback. Returns NULL or a struct clk for the 4772 * given clock specifier 4773 * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a 4774 * struct clk_hw for the given clock specifier 4775 * @data: context pointer to be passed into @get callback 4776 */ 4777 struct of_clk_provider { 4778 struct list_head link; 4779 4780 struct device_node *node; 4781 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 4782 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 4783 void *data; 4784 }; 4785 4786 extern struct of_device_id __clk_of_table; 4787 static const struct of_device_id __clk_of_table_sentinel 4788 __used __section("__clk_of_table_end"); 4789 4790 static LIST_HEAD(of_clk_providers); 4791 static DEFINE_MUTEX(of_clk_mutex); 4792 4793 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 4794 void *data) 4795 { 4796 return data; 4797 } 4798 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 4799 4800 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 4801 { 4802 return data; 4803 } 4804 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 4805 4806 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 4807 { 4808 struct clk_onecell_data *clk_data = data; 4809 unsigned int idx = clkspec->args[0]; 4810 4811 if (idx >= clk_data->clk_num) { 4812 pr_err("%s: invalid clock index %u\n", __func__, idx); 4813 return ERR_PTR(-EINVAL); 4814 } 4815 4816 return clk_data->clks[idx]; 4817 } 4818 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 4819 4820 struct clk_hw * 4821 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 4822 { 4823 struct clk_hw_onecell_data *hw_data = data; 4824 unsigned int idx = clkspec->args[0]; 4825 4826 if (idx >= hw_data->num) { 4827 pr_err("%s: invalid index %u\n", __func__, idx); 4828 return ERR_PTR(-EINVAL); 4829 } 4830 4831 return hw_data->hws[idx]; 4832 } 4833 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 4834 4835 /** 4836 * of_clk_add_provider() - Register a clock provider for a node 4837 * @np: Device node pointer associated with clock provider 4838 * @clk_src_get: callback for decoding clock 4839 * @data: context pointer for @clk_src_get callback. 4840 * 4841 * This function is *deprecated*. Use of_clk_add_hw_provider() instead. 4842 */ 4843 int of_clk_add_provider(struct device_node *np, 4844 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 4845 void *data), 4846 void *data) 4847 { 4848 struct of_clk_provider *cp; 4849 int ret; 4850 4851 if (!np) 4852 return 0; 4853 4854 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 4855 if (!cp) 4856 return -ENOMEM; 4857 4858 cp->node = of_node_get(np); 4859 cp->data = data; 4860 cp->get = clk_src_get; 4861 4862 mutex_lock(&of_clk_mutex); 4863 list_add(&cp->link, &of_clk_providers); 4864 mutex_unlock(&of_clk_mutex); 4865 pr_debug("Added clock from %pOF\n", np); 4866 4867 clk_core_reparent_orphans(); 4868 4869 ret = of_clk_set_defaults(np, true); 4870 if (ret < 0) 4871 of_clk_del_provider(np); 4872 4873 fwnode_dev_initialized(&np->fwnode, true); 4874 4875 return ret; 4876 } 4877 EXPORT_SYMBOL_GPL(of_clk_add_provider); 4878 4879 /** 4880 * of_clk_add_hw_provider() - Register a clock provider for a node 4881 * @np: Device node pointer associated with clock provider 4882 * @get: callback for decoding clk_hw 4883 * @data: context pointer for @get callback. 4884 */ 4885 int of_clk_add_hw_provider(struct device_node *np, 4886 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 4887 void *data), 4888 void *data) 4889 { 4890 struct of_clk_provider *cp; 4891 int ret; 4892 4893 if (!np) 4894 return 0; 4895 4896 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 4897 if (!cp) 4898 return -ENOMEM; 4899 4900 cp->node = of_node_get(np); 4901 cp->data = data; 4902 cp->get_hw = get; 4903 4904 mutex_lock(&of_clk_mutex); 4905 list_add(&cp->link, &of_clk_providers); 4906 mutex_unlock(&of_clk_mutex); 4907 pr_debug("Added clk_hw provider from %pOF\n", np); 4908 4909 clk_core_reparent_orphans(); 4910 4911 ret = of_clk_set_defaults(np, true); 4912 if (ret < 0) 4913 of_clk_del_provider(np); 4914 4915 fwnode_dev_initialized(&np->fwnode, true); 4916 4917 return ret; 4918 } 4919 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 4920 4921 static void devm_of_clk_release_provider(struct device *dev, void *res) 4922 { 4923 of_clk_del_provider(*(struct device_node **)res); 4924 } 4925 4926 /* 4927 * We allow a child device to use its parent device as the clock provider node 4928 * for cases like MFD sub-devices where the child device driver wants to use 4929 * devm_*() APIs but not list the device in DT as a sub-node. 4930 */ 4931 static struct device_node *get_clk_provider_node(struct device *dev) 4932 { 4933 struct device_node *np, *parent_np; 4934 4935 np = dev->of_node; 4936 parent_np = dev->parent ? dev->parent->of_node : NULL; 4937 4938 if (!of_property_present(np, "#clock-cells")) 4939 if (of_property_present(parent_np, "#clock-cells")) 4940 np = parent_np; 4941 4942 return np; 4943 } 4944 4945 /** 4946 * devm_of_clk_add_hw_provider() - Managed clk provider node registration 4947 * @dev: Device acting as the clock provider (used for DT node and lifetime) 4948 * @get: callback for decoding clk_hw 4949 * @data: context pointer for @get callback 4950 * 4951 * Registers clock provider for given device's node. If the device has no DT 4952 * node or if the device node lacks of clock provider information (#clock-cells) 4953 * then the parent device's node is scanned for this information. If parent node 4954 * has the #clock-cells then it is used in registration. Provider is 4955 * automatically released at device exit. 4956 * 4957 * Return: 0 on success or an errno on failure. 4958 */ 4959 int devm_of_clk_add_hw_provider(struct device *dev, 4960 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 4961 void *data), 4962 void *data) 4963 { 4964 struct device_node **ptr, *np; 4965 int ret; 4966 4967 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 4968 GFP_KERNEL); 4969 if (!ptr) 4970 return -ENOMEM; 4971 4972 np = get_clk_provider_node(dev); 4973 ret = of_clk_add_hw_provider(np, get, data); 4974 if (!ret) { 4975 *ptr = np; 4976 devres_add(dev, ptr); 4977 } else { 4978 devres_free(ptr); 4979 } 4980 4981 return ret; 4982 } 4983 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 4984 4985 /** 4986 * of_clk_del_provider() - Remove a previously registered clock provider 4987 * @np: Device node pointer associated with clock provider 4988 */ 4989 void of_clk_del_provider(struct device_node *np) 4990 { 4991 struct of_clk_provider *cp; 4992 4993 if (!np) 4994 return; 4995 4996 mutex_lock(&of_clk_mutex); 4997 list_for_each_entry(cp, &of_clk_providers, link) { 4998 if (cp->node == np) { 4999 list_del(&cp->link); 5000 fwnode_dev_initialized(&np->fwnode, false); 5001 of_node_put(cp->node); 5002 kfree(cp); 5003 break; 5004 } 5005 } 5006 mutex_unlock(&of_clk_mutex); 5007 } 5008 EXPORT_SYMBOL_GPL(of_clk_del_provider); 5009 5010 /** 5011 * of_parse_clkspec() - Parse a DT clock specifier for a given device node 5012 * @np: device node to parse clock specifier from 5013 * @index: index of phandle to parse clock out of. If index < 0, @name is used 5014 * @name: clock name to find and parse. If name is NULL, the index is used 5015 * @out_args: Result of parsing the clock specifier 5016 * 5017 * Parses a device node's "clocks" and "clock-names" properties to find the 5018 * phandle and cells for the index or name that is desired. The resulting clock 5019 * specifier is placed into @out_args, or an errno is returned when there's a 5020 * parsing error. The @index argument is ignored if @name is non-NULL. 5021 * 5022 * Example: 5023 * 5024 * phandle1: clock-controller@1 { 5025 * #clock-cells = <2>; 5026 * } 5027 * 5028 * phandle2: clock-controller@2 { 5029 * #clock-cells = <1>; 5030 * } 5031 * 5032 * clock-consumer@3 { 5033 * clocks = <&phandle1 1 2 &phandle2 3>; 5034 * clock-names = "name1", "name2"; 5035 * } 5036 * 5037 * To get a device_node for `clock-controller@2' node you may call this 5038 * function a few different ways: 5039 * 5040 * of_parse_clkspec(clock-consumer@3, -1, "name2", &args); 5041 * of_parse_clkspec(clock-consumer@3, 1, NULL, &args); 5042 * of_parse_clkspec(clock-consumer@3, 1, "name2", &args); 5043 * 5044 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT 5045 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in 5046 * the "clock-names" property of @np. 5047 */ 5048 static int of_parse_clkspec(const struct device_node *np, int index, 5049 const char *name, struct of_phandle_args *out_args) 5050 { 5051 int ret = -ENOENT; 5052 5053 /* Walk up the tree of devices looking for a clock property that matches */ 5054 while (np) { 5055 /* 5056 * For named clocks, first look up the name in the 5057 * "clock-names" property. If it cannot be found, then index 5058 * will be an error code and of_parse_phandle_with_args() will 5059 * return -EINVAL. 5060 */ 5061 if (name) 5062 index = of_property_match_string(np, "clock-names", name); 5063 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 5064 index, out_args); 5065 if (!ret) 5066 break; 5067 if (name && index >= 0) 5068 break; 5069 5070 /* 5071 * No matching clock found on this node. If the parent node 5072 * has a "clock-ranges" property, then we can try one of its 5073 * clocks. 5074 */ 5075 np = np->parent; 5076 if (np && !of_get_property(np, "clock-ranges", NULL)) 5077 break; 5078 index = 0; 5079 } 5080 5081 return ret; 5082 } 5083 5084 static struct clk_hw * 5085 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 5086 struct of_phandle_args *clkspec) 5087 { 5088 struct clk *clk; 5089 5090 if (provider->get_hw) 5091 return provider->get_hw(clkspec, provider->data); 5092 5093 clk = provider->get(clkspec, provider->data); 5094 if (IS_ERR(clk)) 5095 return ERR_CAST(clk); 5096 return __clk_get_hw(clk); 5097 } 5098 5099 static struct clk_hw * 5100 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 5101 { 5102 struct of_clk_provider *provider; 5103 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); 5104 5105 if (!clkspec) 5106 return ERR_PTR(-EINVAL); 5107 5108 mutex_lock(&of_clk_mutex); 5109 list_for_each_entry(provider, &of_clk_providers, link) { 5110 if (provider->node == clkspec->np) { 5111 hw = __of_clk_get_hw_from_provider(provider, clkspec); 5112 if (!IS_ERR(hw)) 5113 break; 5114 } 5115 } 5116 mutex_unlock(&of_clk_mutex); 5117 5118 return hw; 5119 } 5120 5121 /** 5122 * of_clk_get_from_provider() - Lookup a clock from a clock provider 5123 * @clkspec: pointer to a clock specifier data structure 5124 * 5125 * This function looks up a struct clk from the registered list of clock 5126 * providers, an input is a clock specifier data structure as returned 5127 * from the of_parse_phandle_with_args() function call. 5128 */ 5129 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 5130 { 5131 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); 5132 5133 return clk_hw_create_clk(NULL, hw, NULL, __func__); 5134 } 5135 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 5136 5137 struct clk_hw *of_clk_get_hw(struct device_node *np, int index, 5138 const char *con_id) 5139 { 5140 int ret; 5141 struct clk_hw *hw; 5142 struct of_phandle_args clkspec; 5143 5144 ret = of_parse_clkspec(np, index, con_id, &clkspec); 5145 if (ret) 5146 return ERR_PTR(ret); 5147 5148 hw = of_clk_get_hw_from_clkspec(&clkspec); 5149 of_node_put(clkspec.np); 5150 5151 return hw; 5152 } 5153 5154 static struct clk *__of_clk_get(struct device_node *np, 5155 int index, const char *dev_id, 5156 const char *con_id) 5157 { 5158 struct clk_hw *hw = of_clk_get_hw(np, index, con_id); 5159 5160 return clk_hw_create_clk(NULL, hw, dev_id, con_id); 5161 } 5162 5163 struct clk *of_clk_get(struct device_node *np, int index) 5164 { 5165 return __of_clk_get(np, index, np->full_name, NULL); 5166 } 5167 EXPORT_SYMBOL(of_clk_get); 5168 5169 /** 5170 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node 5171 * @np: pointer to clock consumer node 5172 * @name: name of consumer's clock input, or NULL for the first clock reference 5173 * 5174 * This function parses the clocks and clock-names properties, 5175 * and uses them to look up the struct clk from the registered list of clock 5176 * providers. 5177 */ 5178 struct clk *of_clk_get_by_name(struct device_node *np, const char *name) 5179 { 5180 if (!np) 5181 return ERR_PTR(-ENOENT); 5182 5183 return __of_clk_get(np, 0, np->full_name, name); 5184 } 5185 EXPORT_SYMBOL(of_clk_get_by_name); 5186 5187 /** 5188 * of_clk_get_parent_count() - Count the number of clocks a device node has 5189 * @np: device node to count 5190 * 5191 * Returns: The number of clocks that are possible parents of this node 5192 */ 5193 unsigned int of_clk_get_parent_count(const struct device_node *np) 5194 { 5195 int count; 5196 5197 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 5198 if (count < 0) 5199 return 0; 5200 5201 return count; 5202 } 5203 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 5204 5205 const char *of_clk_get_parent_name(const struct device_node *np, int index) 5206 { 5207 struct of_phandle_args clkspec; 5208 struct property *prop; 5209 const char *clk_name; 5210 const __be32 *vp; 5211 u32 pv; 5212 int rc; 5213 int count; 5214 struct clk *clk; 5215 5216 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 5217 &clkspec); 5218 if (rc) 5219 return NULL; 5220 5221 index = clkspec.args_count ? clkspec.args[0] : 0; 5222 count = 0; 5223 5224 /* if there is an indices property, use it to transfer the index 5225 * specified into an array offset for the clock-output-names property. 5226 */ 5227 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 5228 if (index == pv) { 5229 index = count; 5230 break; 5231 } 5232 count++; 5233 } 5234 /* We went off the end of 'clock-indices' without finding it */ 5235 if (prop && !vp) 5236 return NULL; 5237 5238 if (of_property_read_string_index(clkspec.np, "clock-output-names", 5239 index, 5240 &clk_name) < 0) { 5241 /* 5242 * Best effort to get the name if the clock has been 5243 * registered with the framework. If the clock isn't 5244 * registered, we return the node name as the name of 5245 * the clock as long as #clock-cells = 0. 5246 */ 5247 clk = of_clk_get_from_provider(&clkspec); 5248 if (IS_ERR(clk)) { 5249 if (clkspec.args_count == 0) 5250 clk_name = clkspec.np->name; 5251 else 5252 clk_name = NULL; 5253 } else { 5254 clk_name = __clk_get_name(clk); 5255 clk_put(clk); 5256 } 5257 } 5258 5259 5260 of_node_put(clkspec.np); 5261 return clk_name; 5262 } 5263 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 5264 5265 /** 5266 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 5267 * number of parents 5268 * @np: Device node pointer associated with clock provider 5269 * @parents: pointer to char array that hold the parents' names 5270 * @size: size of the @parents array 5271 * 5272 * Return: number of parents for the clock node. 5273 */ 5274 int of_clk_parent_fill(struct device_node *np, const char **parents, 5275 unsigned int size) 5276 { 5277 unsigned int i = 0; 5278 5279 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 5280 i++; 5281 5282 return i; 5283 } 5284 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 5285 5286 struct clock_provider { 5287 void (*clk_init_cb)(struct device_node *); 5288 struct device_node *np; 5289 struct list_head node; 5290 }; 5291 5292 /* 5293 * This function looks for a parent clock. If there is one, then it 5294 * checks that the provider for this parent clock was initialized, in 5295 * this case the parent clock will be ready. 5296 */ 5297 static int parent_ready(struct device_node *np) 5298 { 5299 int i = 0; 5300 5301 while (true) { 5302 struct clk *clk = of_clk_get(np, i); 5303 5304 /* this parent is ready we can check the next one */ 5305 if (!IS_ERR(clk)) { 5306 clk_put(clk); 5307 i++; 5308 continue; 5309 } 5310 5311 /* at least one parent is not ready, we exit now */ 5312 if (PTR_ERR(clk) == -EPROBE_DEFER) 5313 return 0; 5314 5315 /* 5316 * Here we make assumption that the device tree is 5317 * written correctly. So an error means that there is 5318 * no more parent. As we didn't exit yet, then the 5319 * previous parent are ready. If there is no clock 5320 * parent, no need to wait for them, then we can 5321 * consider their absence as being ready 5322 */ 5323 return 1; 5324 } 5325 } 5326 5327 /** 5328 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 5329 * @np: Device node pointer associated with clock provider 5330 * @index: clock index 5331 * @flags: pointer to top-level framework flags 5332 * 5333 * Detects if the clock-critical property exists and, if so, sets the 5334 * corresponding CLK_IS_CRITICAL flag. 5335 * 5336 * Do not use this function. It exists only for legacy Device Tree 5337 * bindings, such as the one-clock-per-node style that are outdated. 5338 * Those bindings typically put all clock data into .dts and the Linux 5339 * driver has no clock data, thus making it impossible to set this flag 5340 * correctly from the driver. Only those drivers may call 5341 * of_clk_detect_critical from their setup functions. 5342 * 5343 * Return: error code or zero on success 5344 */ 5345 int of_clk_detect_critical(struct device_node *np, int index, 5346 unsigned long *flags) 5347 { 5348 struct property *prop; 5349 const __be32 *cur; 5350 uint32_t idx; 5351 5352 if (!np || !flags) 5353 return -EINVAL; 5354 5355 of_property_for_each_u32(np, "clock-critical", prop, cur, idx) 5356 if (index == idx) 5357 *flags |= CLK_IS_CRITICAL; 5358 5359 return 0; 5360 } 5361 5362 /** 5363 * of_clk_init() - Scan and init clock providers from the DT 5364 * @matches: array of compatible values and init functions for providers. 5365 * 5366 * This function scans the device tree for matching clock providers 5367 * and calls their initialization functions. It also does it by trying 5368 * to follow the dependencies. 5369 */ 5370 void __init of_clk_init(const struct of_device_id *matches) 5371 { 5372 const struct of_device_id *match; 5373 struct device_node *np; 5374 struct clock_provider *clk_provider, *next; 5375 bool is_init_done; 5376 bool force = false; 5377 LIST_HEAD(clk_provider_list); 5378 5379 if (!matches) 5380 matches = &__clk_of_table; 5381 5382 /* First prepare the list of the clocks providers */ 5383 for_each_matching_node_and_match(np, matches, &match) { 5384 struct clock_provider *parent; 5385 5386 if (!of_device_is_available(np)) 5387 continue; 5388 5389 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 5390 if (!parent) { 5391 list_for_each_entry_safe(clk_provider, next, 5392 &clk_provider_list, node) { 5393 list_del(&clk_provider->node); 5394 of_node_put(clk_provider->np); 5395 kfree(clk_provider); 5396 } 5397 of_node_put(np); 5398 return; 5399 } 5400 5401 parent->clk_init_cb = match->data; 5402 parent->np = of_node_get(np); 5403 list_add_tail(&parent->node, &clk_provider_list); 5404 } 5405 5406 while (!list_empty(&clk_provider_list)) { 5407 is_init_done = false; 5408 list_for_each_entry_safe(clk_provider, next, 5409 &clk_provider_list, node) { 5410 if (force || parent_ready(clk_provider->np)) { 5411 5412 /* Don't populate platform devices */ 5413 of_node_set_flag(clk_provider->np, 5414 OF_POPULATED); 5415 5416 clk_provider->clk_init_cb(clk_provider->np); 5417 of_clk_set_defaults(clk_provider->np, true); 5418 5419 list_del(&clk_provider->node); 5420 of_node_put(clk_provider->np); 5421 kfree(clk_provider); 5422 is_init_done = true; 5423 } 5424 } 5425 5426 /* 5427 * We didn't manage to initialize any of the 5428 * remaining providers during the last loop, so now we 5429 * initialize all the remaining ones unconditionally 5430 * in case the clock parent was not mandatory 5431 */ 5432 if (!is_init_done) 5433 force = true; 5434 } 5435 } 5436 #endif 5437