1 /* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/clk.txt 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/clk-provider.h> 14 #include <linux/clk/clk-conf.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/spinlock.h> 18 #include <linux/err.h> 19 #include <linux/list.h> 20 #include <linux/slab.h> 21 #include <linux/of.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/sched.h> 26 #include <linux/clkdev.h> 27 28 #include "clk.h" 29 30 static DEFINE_SPINLOCK(enable_lock); 31 static DEFINE_MUTEX(prepare_lock); 32 33 static struct task_struct *prepare_owner; 34 static struct task_struct *enable_owner; 35 36 static int prepare_refcnt; 37 static int enable_refcnt; 38 39 static HLIST_HEAD(clk_root_list); 40 static HLIST_HEAD(clk_orphan_list); 41 static LIST_HEAD(clk_notifier_list); 42 43 /*** private data structures ***/ 44 45 struct clk_core { 46 const char *name; 47 const struct clk_ops *ops; 48 struct clk_hw *hw; 49 struct module *owner; 50 struct device *dev; 51 struct clk_core *parent; 52 const char **parent_names; 53 struct clk_core **parents; 54 u8 num_parents; 55 u8 new_parent_index; 56 unsigned long rate; 57 unsigned long req_rate; 58 unsigned long new_rate; 59 struct clk_core *new_parent; 60 struct clk_core *new_child; 61 unsigned long flags; 62 bool orphan; 63 unsigned int enable_count; 64 unsigned int prepare_count; 65 unsigned long min_rate; 66 unsigned long max_rate; 67 unsigned long accuracy; 68 int phase; 69 struct hlist_head children; 70 struct hlist_node child_node; 71 struct hlist_head clks; 72 unsigned int notifier_count; 73 #ifdef CONFIG_DEBUG_FS 74 struct dentry *dentry; 75 struct hlist_node debug_node; 76 #endif 77 struct kref ref; 78 }; 79 80 #define CREATE_TRACE_POINTS 81 #include <trace/events/clk.h> 82 83 struct clk { 84 struct clk_core *core; 85 const char *dev_id; 86 const char *con_id; 87 unsigned long min_rate; 88 unsigned long max_rate; 89 struct hlist_node clks_node; 90 }; 91 92 /*** runtime pm ***/ 93 static int clk_pm_runtime_get(struct clk_core *core) 94 { 95 int ret = 0; 96 97 if (!core->dev) 98 return 0; 99 100 ret = pm_runtime_get_sync(core->dev); 101 return ret < 0 ? ret : 0; 102 } 103 104 static void clk_pm_runtime_put(struct clk_core *core) 105 { 106 if (!core->dev) 107 return; 108 109 pm_runtime_put_sync(core->dev); 110 } 111 112 /*** locking ***/ 113 static void clk_prepare_lock(void) 114 { 115 if (!mutex_trylock(&prepare_lock)) { 116 if (prepare_owner == current) { 117 prepare_refcnt++; 118 return; 119 } 120 mutex_lock(&prepare_lock); 121 } 122 WARN_ON_ONCE(prepare_owner != NULL); 123 WARN_ON_ONCE(prepare_refcnt != 0); 124 prepare_owner = current; 125 prepare_refcnt = 1; 126 } 127 128 static void clk_prepare_unlock(void) 129 { 130 WARN_ON_ONCE(prepare_owner != current); 131 WARN_ON_ONCE(prepare_refcnt == 0); 132 133 if (--prepare_refcnt) 134 return; 135 prepare_owner = NULL; 136 mutex_unlock(&prepare_lock); 137 } 138 139 static unsigned long clk_enable_lock(void) 140 __acquires(enable_lock) 141 { 142 unsigned long flags; 143 144 if (!spin_trylock_irqsave(&enable_lock, flags)) { 145 if (enable_owner == current) { 146 enable_refcnt++; 147 __acquire(enable_lock); 148 return flags; 149 } 150 spin_lock_irqsave(&enable_lock, flags); 151 } 152 WARN_ON_ONCE(enable_owner != NULL); 153 WARN_ON_ONCE(enable_refcnt != 0); 154 enable_owner = current; 155 enable_refcnt = 1; 156 return flags; 157 } 158 159 static void clk_enable_unlock(unsigned long flags) 160 __releases(enable_lock) 161 { 162 WARN_ON_ONCE(enable_owner != current); 163 WARN_ON_ONCE(enable_refcnt == 0); 164 165 if (--enable_refcnt) { 166 __release(enable_lock); 167 return; 168 } 169 enable_owner = NULL; 170 spin_unlock_irqrestore(&enable_lock, flags); 171 } 172 173 static bool clk_core_is_prepared(struct clk_core *core) 174 { 175 bool ret = false; 176 177 /* 178 * .is_prepared is optional for clocks that can prepare 179 * fall back to software usage counter if it is missing 180 */ 181 if (!core->ops->is_prepared) 182 return core->prepare_count; 183 184 if (!clk_pm_runtime_get(core)) { 185 ret = core->ops->is_prepared(core->hw); 186 clk_pm_runtime_put(core); 187 } 188 189 return ret; 190 } 191 192 static bool clk_core_is_enabled(struct clk_core *core) 193 { 194 bool ret = false; 195 196 /* 197 * .is_enabled is only mandatory for clocks that gate 198 * fall back to software usage counter if .is_enabled is missing 199 */ 200 if (!core->ops->is_enabled) 201 return core->enable_count; 202 203 /* 204 * Check if clock controller's device is runtime active before 205 * calling .is_enabled callback. If not, assume that clock is 206 * disabled, because we might be called from atomic context, from 207 * which pm_runtime_get() is not allowed. 208 * This function is called mainly from clk_disable_unused_subtree, 209 * which ensures proper runtime pm activation of controller before 210 * taking enable spinlock, but the below check is needed if one tries 211 * to call it from other places. 212 */ 213 if (core->dev) { 214 pm_runtime_get_noresume(core->dev); 215 if (!pm_runtime_active(core->dev)) { 216 ret = false; 217 goto done; 218 } 219 } 220 221 ret = core->ops->is_enabled(core->hw); 222 done: 223 clk_pm_runtime_put(core); 224 225 return ret; 226 } 227 228 /*** helper functions ***/ 229 230 const char *__clk_get_name(const struct clk *clk) 231 { 232 return !clk ? NULL : clk->core->name; 233 } 234 EXPORT_SYMBOL_GPL(__clk_get_name); 235 236 const char *clk_hw_get_name(const struct clk_hw *hw) 237 { 238 return hw->core->name; 239 } 240 EXPORT_SYMBOL_GPL(clk_hw_get_name); 241 242 struct clk_hw *__clk_get_hw(struct clk *clk) 243 { 244 return !clk ? NULL : clk->core->hw; 245 } 246 EXPORT_SYMBOL_GPL(__clk_get_hw); 247 248 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 249 { 250 return hw->core->num_parents; 251 } 252 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 253 254 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 255 { 256 return hw->core->parent ? hw->core->parent->hw : NULL; 257 } 258 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 259 260 static struct clk_core *__clk_lookup_subtree(const char *name, 261 struct clk_core *core) 262 { 263 struct clk_core *child; 264 struct clk_core *ret; 265 266 if (!strcmp(core->name, name)) 267 return core; 268 269 hlist_for_each_entry(child, &core->children, child_node) { 270 ret = __clk_lookup_subtree(name, child); 271 if (ret) 272 return ret; 273 } 274 275 return NULL; 276 } 277 278 static struct clk_core *clk_core_lookup(const char *name) 279 { 280 struct clk_core *root_clk; 281 struct clk_core *ret; 282 283 if (!name) 284 return NULL; 285 286 /* search the 'proper' clk tree first */ 287 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 288 ret = __clk_lookup_subtree(name, root_clk); 289 if (ret) 290 return ret; 291 } 292 293 /* if not found, then search the orphan tree */ 294 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 295 ret = __clk_lookup_subtree(name, root_clk); 296 if (ret) 297 return ret; 298 } 299 300 return NULL; 301 } 302 303 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 304 u8 index) 305 { 306 if (!core || index >= core->num_parents) 307 return NULL; 308 309 if (!core->parents[index]) 310 core->parents[index] = 311 clk_core_lookup(core->parent_names[index]); 312 313 return core->parents[index]; 314 } 315 316 struct clk_hw * 317 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 318 { 319 struct clk_core *parent; 320 321 parent = clk_core_get_parent_by_index(hw->core, index); 322 323 return !parent ? NULL : parent->hw; 324 } 325 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 326 327 unsigned int __clk_get_enable_count(struct clk *clk) 328 { 329 return !clk ? 0 : clk->core->enable_count; 330 } 331 332 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 333 { 334 unsigned long ret; 335 336 if (!core) { 337 ret = 0; 338 goto out; 339 } 340 341 ret = core->rate; 342 343 if (!core->num_parents) 344 goto out; 345 346 if (!core->parent) 347 ret = 0; 348 349 out: 350 return ret; 351 } 352 353 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 354 { 355 return clk_core_get_rate_nolock(hw->core); 356 } 357 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 358 359 static unsigned long __clk_get_accuracy(struct clk_core *core) 360 { 361 if (!core) 362 return 0; 363 364 return core->accuracy; 365 } 366 367 unsigned long __clk_get_flags(struct clk *clk) 368 { 369 return !clk ? 0 : clk->core->flags; 370 } 371 EXPORT_SYMBOL_GPL(__clk_get_flags); 372 373 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 374 { 375 return hw->core->flags; 376 } 377 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 378 379 bool clk_hw_is_prepared(const struct clk_hw *hw) 380 { 381 return clk_core_is_prepared(hw->core); 382 } 383 384 bool clk_hw_is_enabled(const struct clk_hw *hw) 385 { 386 return clk_core_is_enabled(hw->core); 387 } 388 389 bool __clk_is_enabled(struct clk *clk) 390 { 391 if (!clk) 392 return false; 393 394 return clk_core_is_enabled(clk->core); 395 } 396 EXPORT_SYMBOL_GPL(__clk_is_enabled); 397 398 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 399 unsigned long best, unsigned long flags) 400 { 401 if (flags & CLK_MUX_ROUND_CLOSEST) 402 return abs(now - rate) < abs(best - rate); 403 404 return now <= rate && now > best; 405 } 406 407 static int 408 clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, 409 unsigned long flags) 410 { 411 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 412 int i, num_parents, ret; 413 unsigned long best = 0; 414 struct clk_rate_request parent_req = *req; 415 416 /* if NO_REPARENT flag set, pass through to current parent */ 417 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 418 parent = core->parent; 419 if (core->flags & CLK_SET_RATE_PARENT) { 420 ret = __clk_determine_rate(parent ? parent->hw : NULL, 421 &parent_req); 422 if (ret) 423 return ret; 424 425 best = parent_req.rate; 426 } else if (parent) { 427 best = clk_core_get_rate_nolock(parent); 428 } else { 429 best = clk_core_get_rate_nolock(core); 430 } 431 432 goto out; 433 } 434 435 /* find the parent that can provide the fastest rate <= rate */ 436 num_parents = core->num_parents; 437 for (i = 0; i < num_parents; i++) { 438 parent = clk_core_get_parent_by_index(core, i); 439 if (!parent) 440 continue; 441 442 if (core->flags & CLK_SET_RATE_PARENT) { 443 parent_req = *req; 444 ret = __clk_determine_rate(parent->hw, &parent_req); 445 if (ret) 446 continue; 447 } else { 448 parent_req.rate = clk_core_get_rate_nolock(parent); 449 } 450 451 if (mux_is_better_rate(req->rate, parent_req.rate, 452 best, flags)) { 453 best_parent = parent; 454 best = parent_req.rate; 455 } 456 } 457 458 if (!best_parent) 459 return -EINVAL; 460 461 out: 462 if (best_parent) 463 req->best_parent_hw = best_parent->hw; 464 req->best_parent_rate = best; 465 req->rate = best; 466 467 return 0; 468 } 469 470 struct clk *__clk_lookup(const char *name) 471 { 472 struct clk_core *core = clk_core_lookup(name); 473 474 return !core ? NULL : core->hw->clk; 475 } 476 477 static void clk_core_get_boundaries(struct clk_core *core, 478 unsigned long *min_rate, 479 unsigned long *max_rate) 480 { 481 struct clk *clk_user; 482 483 *min_rate = core->min_rate; 484 *max_rate = core->max_rate; 485 486 hlist_for_each_entry(clk_user, &core->clks, clks_node) 487 *min_rate = max(*min_rate, clk_user->min_rate); 488 489 hlist_for_each_entry(clk_user, &core->clks, clks_node) 490 *max_rate = min(*max_rate, clk_user->max_rate); 491 } 492 493 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 494 unsigned long max_rate) 495 { 496 hw->core->min_rate = min_rate; 497 hw->core->max_rate = max_rate; 498 } 499 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 500 501 /* 502 * Helper for finding best parent to provide a given frequency. This can be used 503 * directly as a determine_rate callback (e.g. for a mux), or from a more 504 * complex clock that may combine a mux with other operations. 505 */ 506 int __clk_mux_determine_rate(struct clk_hw *hw, 507 struct clk_rate_request *req) 508 { 509 return clk_mux_determine_rate_flags(hw, req, 0); 510 } 511 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 512 513 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 514 struct clk_rate_request *req) 515 { 516 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 517 } 518 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 519 520 /*** clk api ***/ 521 522 static void clk_core_unprepare(struct clk_core *core) 523 { 524 lockdep_assert_held(&prepare_lock); 525 526 if (!core) 527 return; 528 529 if (WARN_ON(core->prepare_count == 0)) 530 return; 531 532 if (WARN_ON(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL)) 533 return; 534 535 if (--core->prepare_count > 0) 536 return; 537 538 WARN_ON(core->enable_count > 0); 539 540 trace_clk_unprepare(core); 541 542 if (core->ops->unprepare) 543 core->ops->unprepare(core->hw); 544 545 clk_pm_runtime_put(core); 546 547 trace_clk_unprepare_complete(core); 548 clk_core_unprepare(core->parent); 549 } 550 551 static void clk_core_unprepare_lock(struct clk_core *core) 552 { 553 clk_prepare_lock(); 554 clk_core_unprepare(core); 555 clk_prepare_unlock(); 556 } 557 558 /** 559 * clk_unprepare - undo preparation of a clock source 560 * @clk: the clk being unprepared 561 * 562 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 563 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 564 * if the operation may sleep. One example is a clk which is accessed over 565 * I2c. In the complex case a clk gate operation may require a fast and a slow 566 * part. It is this reason that clk_unprepare and clk_disable are not mutually 567 * exclusive. In fact clk_disable must be called before clk_unprepare. 568 */ 569 void clk_unprepare(struct clk *clk) 570 { 571 if (IS_ERR_OR_NULL(clk)) 572 return; 573 574 clk_core_unprepare_lock(clk->core); 575 } 576 EXPORT_SYMBOL_GPL(clk_unprepare); 577 578 static int clk_core_prepare(struct clk_core *core) 579 { 580 int ret = 0; 581 582 lockdep_assert_held(&prepare_lock); 583 584 if (!core) 585 return 0; 586 587 if (core->prepare_count == 0) { 588 ret = clk_pm_runtime_get(core); 589 if (ret) 590 return ret; 591 592 ret = clk_core_prepare(core->parent); 593 if (ret) 594 goto runtime_put; 595 596 trace_clk_prepare(core); 597 598 if (core->ops->prepare) 599 ret = core->ops->prepare(core->hw); 600 601 trace_clk_prepare_complete(core); 602 603 if (ret) 604 goto unprepare; 605 } 606 607 core->prepare_count++; 608 609 return 0; 610 unprepare: 611 clk_core_unprepare(core->parent); 612 runtime_put: 613 clk_pm_runtime_put(core); 614 return ret; 615 } 616 617 static int clk_core_prepare_lock(struct clk_core *core) 618 { 619 int ret; 620 621 clk_prepare_lock(); 622 ret = clk_core_prepare(core); 623 clk_prepare_unlock(); 624 625 return ret; 626 } 627 628 /** 629 * clk_prepare - prepare a clock source 630 * @clk: the clk being prepared 631 * 632 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 633 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 634 * operation may sleep. One example is a clk which is accessed over I2c. In 635 * the complex case a clk ungate operation may require a fast and a slow part. 636 * It is this reason that clk_prepare and clk_enable are not mutually 637 * exclusive. In fact clk_prepare must be called before clk_enable. 638 * Returns 0 on success, -EERROR otherwise. 639 */ 640 int clk_prepare(struct clk *clk) 641 { 642 if (!clk) 643 return 0; 644 645 return clk_core_prepare_lock(clk->core); 646 } 647 EXPORT_SYMBOL_GPL(clk_prepare); 648 649 static void clk_core_disable(struct clk_core *core) 650 { 651 lockdep_assert_held(&enable_lock); 652 653 if (!core) 654 return; 655 656 if (WARN_ON(core->enable_count == 0)) 657 return; 658 659 if (WARN_ON(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL)) 660 return; 661 662 if (--core->enable_count > 0) 663 return; 664 665 trace_clk_disable_rcuidle(core); 666 667 if (core->ops->disable) 668 core->ops->disable(core->hw); 669 670 trace_clk_disable_complete_rcuidle(core); 671 672 clk_core_disable(core->parent); 673 } 674 675 static void clk_core_disable_lock(struct clk_core *core) 676 { 677 unsigned long flags; 678 679 flags = clk_enable_lock(); 680 clk_core_disable(core); 681 clk_enable_unlock(flags); 682 } 683 684 /** 685 * clk_disable - gate a clock 686 * @clk: the clk being gated 687 * 688 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 689 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 690 * clk if the operation is fast and will never sleep. One example is a 691 * SoC-internal clk which is controlled via simple register writes. In the 692 * complex case a clk gate operation may require a fast and a slow part. It is 693 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 694 * In fact clk_disable must be called before clk_unprepare. 695 */ 696 void clk_disable(struct clk *clk) 697 { 698 if (IS_ERR_OR_NULL(clk)) 699 return; 700 701 clk_core_disable_lock(clk->core); 702 } 703 EXPORT_SYMBOL_GPL(clk_disable); 704 705 static int clk_core_enable(struct clk_core *core) 706 { 707 int ret = 0; 708 709 lockdep_assert_held(&enable_lock); 710 711 if (!core) 712 return 0; 713 714 if (WARN_ON(core->prepare_count == 0)) 715 return -ESHUTDOWN; 716 717 if (core->enable_count == 0) { 718 ret = clk_core_enable(core->parent); 719 720 if (ret) 721 return ret; 722 723 trace_clk_enable_rcuidle(core); 724 725 if (core->ops->enable) 726 ret = core->ops->enable(core->hw); 727 728 trace_clk_enable_complete_rcuidle(core); 729 730 if (ret) { 731 clk_core_disable(core->parent); 732 return ret; 733 } 734 } 735 736 core->enable_count++; 737 return 0; 738 } 739 740 static int clk_core_enable_lock(struct clk_core *core) 741 { 742 unsigned long flags; 743 int ret; 744 745 flags = clk_enable_lock(); 746 ret = clk_core_enable(core); 747 clk_enable_unlock(flags); 748 749 return ret; 750 } 751 752 /** 753 * clk_enable - ungate a clock 754 * @clk: the clk being ungated 755 * 756 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 757 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 758 * if the operation will never sleep. One example is a SoC-internal clk which 759 * is controlled via simple register writes. In the complex case a clk ungate 760 * operation may require a fast and a slow part. It is this reason that 761 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 762 * must be called before clk_enable. Returns 0 on success, -EERROR 763 * otherwise. 764 */ 765 int clk_enable(struct clk *clk) 766 { 767 if (!clk) 768 return 0; 769 770 return clk_core_enable_lock(clk->core); 771 } 772 EXPORT_SYMBOL_GPL(clk_enable); 773 774 static int clk_core_prepare_enable(struct clk_core *core) 775 { 776 int ret; 777 778 ret = clk_core_prepare_lock(core); 779 if (ret) 780 return ret; 781 782 ret = clk_core_enable_lock(core); 783 if (ret) 784 clk_core_unprepare_lock(core); 785 786 return ret; 787 } 788 789 static void clk_core_disable_unprepare(struct clk_core *core) 790 { 791 clk_core_disable_lock(core); 792 clk_core_unprepare_lock(core); 793 } 794 795 static void clk_unprepare_unused_subtree(struct clk_core *core) 796 { 797 struct clk_core *child; 798 799 lockdep_assert_held(&prepare_lock); 800 801 hlist_for_each_entry(child, &core->children, child_node) 802 clk_unprepare_unused_subtree(child); 803 804 if (core->prepare_count) 805 return; 806 807 if (core->flags & CLK_IGNORE_UNUSED) 808 return; 809 810 if (clk_pm_runtime_get(core)) 811 return; 812 813 if (clk_core_is_prepared(core)) { 814 trace_clk_unprepare(core); 815 if (core->ops->unprepare_unused) 816 core->ops->unprepare_unused(core->hw); 817 else if (core->ops->unprepare) 818 core->ops->unprepare(core->hw); 819 trace_clk_unprepare_complete(core); 820 } 821 822 clk_pm_runtime_put(core); 823 } 824 825 static void clk_disable_unused_subtree(struct clk_core *core) 826 { 827 struct clk_core *child; 828 unsigned long flags; 829 830 lockdep_assert_held(&prepare_lock); 831 832 hlist_for_each_entry(child, &core->children, child_node) 833 clk_disable_unused_subtree(child); 834 835 if (core->flags & CLK_OPS_PARENT_ENABLE) 836 clk_core_prepare_enable(core->parent); 837 838 if (clk_pm_runtime_get(core)) 839 goto unprepare_out; 840 841 flags = clk_enable_lock(); 842 843 if (core->enable_count) 844 goto unlock_out; 845 846 if (core->flags & CLK_IGNORE_UNUSED) 847 goto unlock_out; 848 849 /* 850 * some gate clocks have special needs during the disable-unused 851 * sequence. call .disable_unused if available, otherwise fall 852 * back to .disable 853 */ 854 if (clk_core_is_enabled(core)) { 855 trace_clk_disable(core); 856 if (core->ops->disable_unused) 857 core->ops->disable_unused(core->hw); 858 else if (core->ops->disable) 859 core->ops->disable(core->hw); 860 trace_clk_disable_complete(core); 861 } 862 863 unlock_out: 864 clk_enable_unlock(flags); 865 clk_pm_runtime_put(core); 866 unprepare_out: 867 if (core->flags & CLK_OPS_PARENT_ENABLE) 868 clk_core_disable_unprepare(core->parent); 869 } 870 871 static bool clk_ignore_unused; 872 static int __init clk_ignore_unused_setup(char *__unused) 873 { 874 clk_ignore_unused = true; 875 return 1; 876 } 877 __setup("clk_ignore_unused", clk_ignore_unused_setup); 878 879 static int clk_disable_unused(void) 880 { 881 struct clk_core *core; 882 883 if (clk_ignore_unused) { 884 pr_warn("clk: Not disabling unused clocks\n"); 885 return 0; 886 } 887 888 clk_prepare_lock(); 889 890 hlist_for_each_entry(core, &clk_root_list, child_node) 891 clk_disable_unused_subtree(core); 892 893 hlist_for_each_entry(core, &clk_orphan_list, child_node) 894 clk_disable_unused_subtree(core); 895 896 hlist_for_each_entry(core, &clk_root_list, child_node) 897 clk_unprepare_unused_subtree(core); 898 899 hlist_for_each_entry(core, &clk_orphan_list, child_node) 900 clk_unprepare_unused_subtree(core); 901 902 clk_prepare_unlock(); 903 904 return 0; 905 } 906 late_initcall_sync(clk_disable_unused); 907 908 static int clk_core_round_rate_nolock(struct clk_core *core, 909 struct clk_rate_request *req) 910 { 911 struct clk_core *parent; 912 long rate; 913 914 lockdep_assert_held(&prepare_lock); 915 916 if (!core) 917 return 0; 918 919 parent = core->parent; 920 if (parent) { 921 req->best_parent_hw = parent->hw; 922 req->best_parent_rate = parent->rate; 923 } else { 924 req->best_parent_hw = NULL; 925 req->best_parent_rate = 0; 926 } 927 928 if (core->ops->determine_rate) { 929 return core->ops->determine_rate(core->hw, req); 930 } else if (core->ops->round_rate) { 931 rate = core->ops->round_rate(core->hw, req->rate, 932 &req->best_parent_rate); 933 if (rate < 0) 934 return rate; 935 936 req->rate = rate; 937 } else if (core->flags & CLK_SET_RATE_PARENT) { 938 return clk_core_round_rate_nolock(parent, req); 939 } else { 940 req->rate = core->rate; 941 } 942 943 return 0; 944 } 945 946 /** 947 * __clk_determine_rate - get the closest rate actually supported by a clock 948 * @hw: determine the rate of this clock 949 * @req: target rate request 950 * 951 * Useful for clk_ops such as .set_rate and .determine_rate. 952 */ 953 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 954 { 955 if (!hw) { 956 req->rate = 0; 957 return 0; 958 } 959 960 return clk_core_round_rate_nolock(hw->core, req); 961 } 962 EXPORT_SYMBOL_GPL(__clk_determine_rate); 963 964 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 965 { 966 int ret; 967 struct clk_rate_request req; 968 969 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); 970 req.rate = rate; 971 972 ret = clk_core_round_rate_nolock(hw->core, &req); 973 if (ret) 974 return 0; 975 976 return req.rate; 977 } 978 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 979 980 /** 981 * clk_round_rate - round the given rate for a clk 982 * @clk: the clk for which we are rounding a rate 983 * @rate: the rate which is to be rounded 984 * 985 * Takes in a rate as input and rounds it to a rate that the clk can actually 986 * use which is then returned. If clk doesn't support round_rate operation 987 * then the parent rate is returned. 988 */ 989 long clk_round_rate(struct clk *clk, unsigned long rate) 990 { 991 struct clk_rate_request req; 992 int ret; 993 994 if (!clk) 995 return 0; 996 997 clk_prepare_lock(); 998 999 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); 1000 req.rate = rate; 1001 1002 ret = clk_core_round_rate_nolock(clk->core, &req); 1003 clk_prepare_unlock(); 1004 1005 if (ret) 1006 return ret; 1007 1008 return req.rate; 1009 } 1010 EXPORT_SYMBOL_GPL(clk_round_rate); 1011 1012 /** 1013 * __clk_notify - call clk notifier chain 1014 * @core: clk that is changing rate 1015 * @msg: clk notifier type (see include/linux/clk.h) 1016 * @old_rate: old clk rate 1017 * @new_rate: new clk rate 1018 * 1019 * Triggers a notifier call chain on the clk rate-change notification 1020 * for 'clk'. Passes a pointer to the struct clk and the previous 1021 * and current rates to the notifier callback. Intended to be called by 1022 * internal clock code only. Returns NOTIFY_DONE from the last driver 1023 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1024 * a driver returns that. 1025 */ 1026 static int __clk_notify(struct clk_core *core, unsigned long msg, 1027 unsigned long old_rate, unsigned long new_rate) 1028 { 1029 struct clk_notifier *cn; 1030 struct clk_notifier_data cnd; 1031 int ret = NOTIFY_DONE; 1032 1033 cnd.old_rate = old_rate; 1034 cnd.new_rate = new_rate; 1035 1036 list_for_each_entry(cn, &clk_notifier_list, node) { 1037 if (cn->clk->core == core) { 1038 cnd.clk = cn->clk; 1039 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1040 &cnd); 1041 if (ret & NOTIFY_STOP_MASK) 1042 return ret; 1043 } 1044 } 1045 1046 return ret; 1047 } 1048 1049 /** 1050 * __clk_recalc_accuracies 1051 * @core: first clk in the subtree 1052 * 1053 * Walks the subtree of clks starting with clk and recalculates accuracies as 1054 * it goes. Note that if a clk does not implement the .recalc_accuracy 1055 * callback then it is assumed that the clock will take on the accuracy of its 1056 * parent. 1057 */ 1058 static void __clk_recalc_accuracies(struct clk_core *core) 1059 { 1060 unsigned long parent_accuracy = 0; 1061 struct clk_core *child; 1062 1063 lockdep_assert_held(&prepare_lock); 1064 1065 if (core->parent) 1066 parent_accuracy = core->parent->accuracy; 1067 1068 if (core->ops->recalc_accuracy) 1069 core->accuracy = core->ops->recalc_accuracy(core->hw, 1070 parent_accuracy); 1071 else 1072 core->accuracy = parent_accuracy; 1073 1074 hlist_for_each_entry(child, &core->children, child_node) 1075 __clk_recalc_accuracies(child); 1076 } 1077 1078 static long clk_core_get_accuracy(struct clk_core *core) 1079 { 1080 unsigned long accuracy; 1081 1082 clk_prepare_lock(); 1083 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1084 __clk_recalc_accuracies(core); 1085 1086 accuracy = __clk_get_accuracy(core); 1087 clk_prepare_unlock(); 1088 1089 return accuracy; 1090 } 1091 1092 /** 1093 * clk_get_accuracy - return the accuracy of clk 1094 * @clk: the clk whose accuracy is being returned 1095 * 1096 * Simply returns the cached accuracy of the clk, unless 1097 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1098 * issued. 1099 * If clk is NULL then returns 0. 1100 */ 1101 long clk_get_accuracy(struct clk *clk) 1102 { 1103 if (!clk) 1104 return 0; 1105 1106 return clk_core_get_accuracy(clk->core); 1107 } 1108 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1109 1110 static unsigned long clk_recalc(struct clk_core *core, 1111 unsigned long parent_rate) 1112 { 1113 unsigned long rate = parent_rate; 1114 1115 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1116 rate = core->ops->recalc_rate(core->hw, parent_rate); 1117 clk_pm_runtime_put(core); 1118 } 1119 return rate; 1120 } 1121 1122 /** 1123 * __clk_recalc_rates 1124 * @core: first clk in the subtree 1125 * @msg: notification type (see include/linux/clk.h) 1126 * 1127 * Walks the subtree of clks starting with clk and recalculates rates as it 1128 * goes. Note that if a clk does not implement the .recalc_rate callback then 1129 * it is assumed that the clock will take on the rate of its parent. 1130 * 1131 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1132 * if necessary. 1133 */ 1134 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) 1135 { 1136 unsigned long old_rate; 1137 unsigned long parent_rate = 0; 1138 struct clk_core *child; 1139 1140 lockdep_assert_held(&prepare_lock); 1141 1142 old_rate = core->rate; 1143 1144 if (core->parent) 1145 parent_rate = core->parent->rate; 1146 1147 core->rate = clk_recalc(core, parent_rate); 1148 1149 /* 1150 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1151 * & ABORT_RATE_CHANGE notifiers 1152 */ 1153 if (core->notifier_count && msg) 1154 __clk_notify(core, msg, old_rate, core->rate); 1155 1156 hlist_for_each_entry(child, &core->children, child_node) 1157 __clk_recalc_rates(child, msg); 1158 } 1159 1160 static unsigned long clk_core_get_rate(struct clk_core *core) 1161 { 1162 unsigned long rate; 1163 1164 clk_prepare_lock(); 1165 1166 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1167 __clk_recalc_rates(core, 0); 1168 1169 rate = clk_core_get_rate_nolock(core); 1170 clk_prepare_unlock(); 1171 1172 return rate; 1173 } 1174 1175 /** 1176 * clk_get_rate - return the rate of clk 1177 * @clk: the clk whose rate is being returned 1178 * 1179 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1180 * is set, which means a recalc_rate will be issued. 1181 * If clk is NULL then returns 0. 1182 */ 1183 unsigned long clk_get_rate(struct clk *clk) 1184 { 1185 if (!clk) 1186 return 0; 1187 1188 return clk_core_get_rate(clk->core); 1189 } 1190 EXPORT_SYMBOL_GPL(clk_get_rate); 1191 1192 static int clk_fetch_parent_index(struct clk_core *core, 1193 struct clk_core *parent) 1194 { 1195 int i; 1196 1197 if (!parent) 1198 return -EINVAL; 1199 1200 for (i = 0; i < core->num_parents; i++) 1201 if (clk_core_get_parent_by_index(core, i) == parent) 1202 return i; 1203 1204 return -EINVAL; 1205 } 1206 1207 /* 1208 * Update the orphan status of @core and all its children. 1209 */ 1210 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1211 { 1212 struct clk_core *child; 1213 1214 core->orphan = is_orphan; 1215 1216 hlist_for_each_entry(child, &core->children, child_node) 1217 clk_core_update_orphan_status(child, is_orphan); 1218 } 1219 1220 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1221 { 1222 bool was_orphan = core->orphan; 1223 1224 hlist_del(&core->child_node); 1225 1226 if (new_parent) { 1227 bool becomes_orphan = new_parent->orphan; 1228 1229 /* avoid duplicate POST_RATE_CHANGE notifications */ 1230 if (new_parent->new_child == core) 1231 new_parent->new_child = NULL; 1232 1233 hlist_add_head(&core->child_node, &new_parent->children); 1234 1235 if (was_orphan != becomes_orphan) 1236 clk_core_update_orphan_status(core, becomes_orphan); 1237 } else { 1238 hlist_add_head(&core->child_node, &clk_orphan_list); 1239 if (!was_orphan) 1240 clk_core_update_orphan_status(core, true); 1241 } 1242 1243 core->parent = new_parent; 1244 } 1245 1246 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 1247 struct clk_core *parent) 1248 { 1249 unsigned long flags; 1250 struct clk_core *old_parent = core->parent; 1251 1252 /* 1253 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 1254 * 1255 * 2. Migrate prepare state between parents and prevent race with 1256 * clk_enable(). 1257 * 1258 * If the clock is not prepared, then a race with 1259 * clk_enable/disable() is impossible since we already have the 1260 * prepare lock (future calls to clk_enable() need to be preceded by 1261 * a clk_prepare()). 1262 * 1263 * If the clock is prepared, migrate the prepared state to the new 1264 * parent and also protect against a race with clk_enable() by 1265 * forcing the clock and the new parent on. This ensures that all 1266 * future calls to clk_enable() are practically NOPs with respect to 1267 * hardware and software states. 1268 * 1269 * See also: Comment for clk_set_parent() below. 1270 */ 1271 1272 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 1273 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1274 clk_core_prepare_enable(old_parent); 1275 clk_core_prepare_enable(parent); 1276 } 1277 1278 /* migrate prepare count if > 0 */ 1279 if (core->prepare_count) { 1280 clk_core_prepare_enable(parent); 1281 clk_core_enable_lock(core); 1282 } 1283 1284 /* update the clk tree topology */ 1285 flags = clk_enable_lock(); 1286 clk_reparent(core, parent); 1287 clk_enable_unlock(flags); 1288 1289 return old_parent; 1290 } 1291 1292 static void __clk_set_parent_after(struct clk_core *core, 1293 struct clk_core *parent, 1294 struct clk_core *old_parent) 1295 { 1296 /* 1297 * Finish the migration of prepare state and undo the changes done 1298 * for preventing a race with clk_enable(). 1299 */ 1300 if (core->prepare_count) { 1301 clk_core_disable_lock(core); 1302 clk_core_disable_unprepare(old_parent); 1303 } 1304 1305 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 1306 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1307 clk_core_disable_unprepare(parent); 1308 clk_core_disable_unprepare(old_parent); 1309 } 1310 } 1311 1312 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 1313 u8 p_index) 1314 { 1315 unsigned long flags; 1316 int ret = 0; 1317 struct clk_core *old_parent; 1318 1319 old_parent = __clk_set_parent_before(core, parent); 1320 1321 trace_clk_set_parent(core, parent); 1322 1323 /* change clock input source */ 1324 if (parent && core->ops->set_parent) 1325 ret = core->ops->set_parent(core->hw, p_index); 1326 1327 trace_clk_set_parent_complete(core, parent); 1328 1329 if (ret) { 1330 flags = clk_enable_lock(); 1331 clk_reparent(core, old_parent); 1332 clk_enable_unlock(flags); 1333 __clk_set_parent_after(core, old_parent, parent); 1334 1335 return ret; 1336 } 1337 1338 __clk_set_parent_after(core, parent, old_parent); 1339 1340 return 0; 1341 } 1342 1343 /** 1344 * __clk_speculate_rates 1345 * @core: first clk in the subtree 1346 * @parent_rate: the "future" rate of clk's parent 1347 * 1348 * Walks the subtree of clks starting with clk, speculating rates as it 1349 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1350 * 1351 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1352 * pre-rate change notifications and returns early if no clks in the 1353 * subtree have subscribed to the notifications. Note that if a clk does not 1354 * implement the .recalc_rate callback then it is assumed that the clock will 1355 * take on the rate of its parent. 1356 */ 1357 static int __clk_speculate_rates(struct clk_core *core, 1358 unsigned long parent_rate) 1359 { 1360 struct clk_core *child; 1361 unsigned long new_rate; 1362 int ret = NOTIFY_DONE; 1363 1364 lockdep_assert_held(&prepare_lock); 1365 1366 new_rate = clk_recalc(core, parent_rate); 1367 1368 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1369 if (core->notifier_count) 1370 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 1371 1372 if (ret & NOTIFY_STOP_MASK) { 1373 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1374 __func__, core->name, ret); 1375 goto out; 1376 } 1377 1378 hlist_for_each_entry(child, &core->children, child_node) { 1379 ret = __clk_speculate_rates(child, new_rate); 1380 if (ret & NOTIFY_STOP_MASK) 1381 break; 1382 } 1383 1384 out: 1385 return ret; 1386 } 1387 1388 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 1389 struct clk_core *new_parent, u8 p_index) 1390 { 1391 struct clk_core *child; 1392 1393 core->new_rate = new_rate; 1394 core->new_parent = new_parent; 1395 core->new_parent_index = p_index; 1396 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1397 core->new_child = NULL; 1398 if (new_parent && new_parent != core->parent) 1399 new_parent->new_child = core; 1400 1401 hlist_for_each_entry(child, &core->children, child_node) { 1402 child->new_rate = clk_recalc(child, new_rate); 1403 clk_calc_subtree(child, child->new_rate, NULL, 0); 1404 } 1405 } 1406 1407 /* 1408 * calculate the new rates returning the topmost clock that has to be 1409 * changed. 1410 */ 1411 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 1412 unsigned long rate) 1413 { 1414 struct clk_core *top = core; 1415 struct clk_core *old_parent, *parent; 1416 unsigned long best_parent_rate = 0; 1417 unsigned long new_rate; 1418 unsigned long min_rate; 1419 unsigned long max_rate; 1420 int p_index = 0; 1421 long ret; 1422 1423 /* sanity */ 1424 if (IS_ERR_OR_NULL(core)) 1425 return NULL; 1426 1427 /* save parent rate, if it exists */ 1428 parent = old_parent = core->parent; 1429 if (parent) 1430 best_parent_rate = parent->rate; 1431 1432 clk_core_get_boundaries(core, &min_rate, &max_rate); 1433 1434 /* find the closest rate and parent clk/rate */ 1435 if (core->ops->determine_rate) { 1436 struct clk_rate_request req; 1437 1438 req.rate = rate; 1439 req.min_rate = min_rate; 1440 req.max_rate = max_rate; 1441 if (parent) { 1442 req.best_parent_hw = parent->hw; 1443 req.best_parent_rate = parent->rate; 1444 } else { 1445 req.best_parent_hw = NULL; 1446 req.best_parent_rate = 0; 1447 } 1448 1449 ret = core->ops->determine_rate(core->hw, &req); 1450 if (ret < 0) 1451 return NULL; 1452 1453 best_parent_rate = req.best_parent_rate; 1454 new_rate = req.rate; 1455 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 1456 } else if (core->ops->round_rate) { 1457 ret = core->ops->round_rate(core->hw, rate, 1458 &best_parent_rate); 1459 if (ret < 0) 1460 return NULL; 1461 1462 new_rate = ret; 1463 if (new_rate < min_rate || new_rate > max_rate) 1464 return NULL; 1465 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 1466 /* pass-through clock without adjustable parent */ 1467 core->new_rate = core->rate; 1468 return NULL; 1469 } else { 1470 /* pass-through clock with adjustable parent */ 1471 top = clk_calc_new_rates(parent, rate); 1472 new_rate = parent->new_rate; 1473 goto out; 1474 } 1475 1476 /* some clocks must be gated to change parent */ 1477 if (parent != old_parent && 1478 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 1479 pr_debug("%s: %s not gated but wants to reparent\n", 1480 __func__, core->name); 1481 return NULL; 1482 } 1483 1484 /* try finding the new parent index */ 1485 if (parent && core->num_parents > 1) { 1486 p_index = clk_fetch_parent_index(core, parent); 1487 if (p_index < 0) { 1488 pr_debug("%s: clk %s can not be parent of clk %s\n", 1489 __func__, parent->name, core->name); 1490 return NULL; 1491 } 1492 } 1493 1494 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 1495 best_parent_rate != parent->rate) 1496 top = clk_calc_new_rates(parent, best_parent_rate); 1497 1498 out: 1499 clk_calc_subtree(core, new_rate, parent, p_index); 1500 1501 return top; 1502 } 1503 1504 /* 1505 * Notify about rate changes in a subtree. Always walk down the whole tree 1506 * so that in case of an error we can walk down the whole tree again and 1507 * abort the change. 1508 */ 1509 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 1510 unsigned long event) 1511 { 1512 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 1513 int ret = NOTIFY_DONE; 1514 1515 if (core->rate == core->new_rate) 1516 return NULL; 1517 1518 if (core->notifier_count) { 1519 ret = __clk_notify(core, event, core->rate, core->new_rate); 1520 if (ret & NOTIFY_STOP_MASK) 1521 fail_clk = core; 1522 } 1523 1524 hlist_for_each_entry(child, &core->children, child_node) { 1525 /* Skip children who will be reparented to another clock */ 1526 if (child->new_parent && child->new_parent != core) 1527 continue; 1528 tmp_clk = clk_propagate_rate_change(child, event); 1529 if (tmp_clk) 1530 fail_clk = tmp_clk; 1531 } 1532 1533 /* handle the new child who might not be in core->children yet */ 1534 if (core->new_child) { 1535 tmp_clk = clk_propagate_rate_change(core->new_child, event); 1536 if (tmp_clk) 1537 fail_clk = tmp_clk; 1538 } 1539 1540 return fail_clk; 1541 } 1542 1543 /* 1544 * walk down a subtree and set the new rates notifying the rate 1545 * change on the way 1546 */ 1547 static void clk_change_rate(struct clk_core *core) 1548 { 1549 struct clk_core *child; 1550 struct hlist_node *tmp; 1551 unsigned long old_rate; 1552 unsigned long best_parent_rate = 0; 1553 bool skip_set_rate = false; 1554 struct clk_core *old_parent; 1555 struct clk_core *parent = NULL; 1556 1557 old_rate = core->rate; 1558 1559 if (core->new_parent) { 1560 parent = core->new_parent; 1561 best_parent_rate = core->new_parent->rate; 1562 } else if (core->parent) { 1563 parent = core->parent; 1564 best_parent_rate = core->parent->rate; 1565 } 1566 1567 if (core->flags & CLK_SET_RATE_UNGATE) { 1568 unsigned long flags; 1569 1570 clk_core_prepare(core); 1571 flags = clk_enable_lock(); 1572 clk_core_enable(core); 1573 clk_enable_unlock(flags); 1574 } 1575 1576 if (core->new_parent && core->new_parent != core->parent) { 1577 old_parent = __clk_set_parent_before(core, core->new_parent); 1578 trace_clk_set_parent(core, core->new_parent); 1579 1580 if (core->ops->set_rate_and_parent) { 1581 skip_set_rate = true; 1582 core->ops->set_rate_and_parent(core->hw, core->new_rate, 1583 best_parent_rate, 1584 core->new_parent_index); 1585 } else if (core->ops->set_parent) { 1586 core->ops->set_parent(core->hw, core->new_parent_index); 1587 } 1588 1589 trace_clk_set_parent_complete(core, core->new_parent); 1590 __clk_set_parent_after(core, core->new_parent, old_parent); 1591 } 1592 1593 if (core->flags & CLK_OPS_PARENT_ENABLE) 1594 clk_core_prepare_enable(parent); 1595 1596 trace_clk_set_rate(core, core->new_rate); 1597 1598 if (!skip_set_rate && core->ops->set_rate) 1599 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 1600 1601 trace_clk_set_rate_complete(core, core->new_rate); 1602 1603 core->rate = clk_recalc(core, best_parent_rate); 1604 1605 if (core->flags & CLK_SET_RATE_UNGATE) { 1606 unsigned long flags; 1607 1608 flags = clk_enable_lock(); 1609 clk_core_disable(core); 1610 clk_enable_unlock(flags); 1611 clk_core_unprepare(core); 1612 } 1613 1614 if (core->flags & CLK_OPS_PARENT_ENABLE) 1615 clk_core_disable_unprepare(parent); 1616 1617 if (core->notifier_count && old_rate != core->rate) 1618 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 1619 1620 if (core->flags & CLK_RECALC_NEW_RATES) 1621 (void)clk_calc_new_rates(core, core->new_rate); 1622 1623 /* 1624 * Use safe iteration, as change_rate can actually swap parents 1625 * for certain clock types. 1626 */ 1627 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 1628 /* Skip children who will be reparented to another clock */ 1629 if (child->new_parent && child->new_parent != core) 1630 continue; 1631 clk_change_rate(child); 1632 } 1633 1634 /* handle the new child who might not be in core->children yet */ 1635 if (core->new_child) 1636 clk_change_rate(core->new_child); 1637 } 1638 1639 static int clk_core_set_rate_nolock(struct clk_core *core, 1640 unsigned long req_rate) 1641 { 1642 struct clk_core *top, *fail_clk; 1643 unsigned long rate = req_rate; 1644 int ret = 0; 1645 1646 if (!core) 1647 return 0; 1648 1649 /* bail early if nothing to do */ 1650 if (rate == clk_core_get_rate_nolock(core)) 1651 return 0; 1652 1653 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count) 1654 return -EBUSY; 1655 1656 /* calculate new rates and get the topmost changed clock */ 1657 top = clk_calc_new_rates(core, rate); 1658 if (!top) 1659 return -EINVAL; 1660 1661 ret = clk_pm_runtime_get(core); 1662 if (ret) 1663 return ret; 1664 1665 /* notify that we are about to change rates */ 1666 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1667 if (fail_clk) { 1668 pr_debug("%s: failed to set %s rate\n", __func__, 1669 fail_clk->name); 1670 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1671 ret = -EBUSY; 1672 goto err; 1673 } 1674 1675 /* change the rates */ 1676 clk_change_rate(top); 1677 1678 core->req_rate = req_rate; 1679 err: 1680 clk_pm_runtime_put(core); 1681 1682 return ret; 1683 } 1684 1685 /** 1686 * clk_set_rate - specify a new rate for clk 1687 * @clk: the clk whose rate is being changed 1688 * @rate: the new rate for clk 1689 * 1690 * In the simplest case clk_set_rate will only adjust the rate of clk. 1691 * 1692 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1693 * propagate up to clk's parent; whether or not this happens depends on the 1694 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1695 * after calling .round_rate then upstream parent propagation is ignored. If 1696 * *parent_rate comes back with a new rate for clk's parent then we propagate 1697 * up to clk's parent and set its rate. Upward propagation will continue 1698 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1699 * .round_rate stops requesting changes to clk's parent_rate. 1700 * 1701 * Rate changes are accomplished via tree traversal that also recalculates the 1702 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1703 * 1704 * Returns 0 on success, -EERROR otherwise. 1705 */ 1706 int clk_set_rate(struct clk *clk, unsigned long rate) 1707 { 1708 int ret; 1709 1710 if (!clk) 1711 return 0; 1712 1713 /* prevent racing with updates to the clock topology */ 1714 clk_prepare_lock(); 1715 1716 ret = clk_core_set_rate_nolock(clk->core, rate); 1717 1718 clk_prepare_unlock(); 1719 1720 return ret; 1721 } 1722 EXPORT_SYMBOL_GPL(clk_set_rate); 1723 1724 /** 1725 * clk_set_rate_range - set a rate range for a clock source 1726 * @clk: clock source 1727 * @min: desired minimum clock rate in Hz, inclusive 1728 * @max: desired maximum clock rate in Hz, inclusive 1729 * 1730 * Returns success (0) or negative errno. 1731 */ 1732 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 1733 { 1734 int ret = 0; 1735 1736 if (!clk) 1737 return 0; 1738 1739 if (min > max) { 1740 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 1741 __func__, clk->core->name, clk->dev_id, clk->con_id, 1742 min, max); 1743 return -EINVAL; 1744 } 1745 1746 clk_prepare_lock(); 1747 1748 if (min != clk->min_rate || max != clk->max_rate) { 1749 clk->min_rate = min; 1750 clk->max_rate = max; 1751 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 1752 } 1753 1754 clk_prepare_unlock(); 1755 1756 return ret; 1757 } 1758 EXPORT_SYMBOL_GPL(clk_set_rate_range); 1759 1760 /** 1761 * clk_set_min_rate - set a minimum clock rate for a clock source 1762 * @clk: clock source 1763 * @rate: desired minimum clock rate in Hz, inclusive 1764 * 1765 * Returns success (0) or negative errno. 1766 */ 1767 int clk_set_min_rate(struct clk *clk, unsigned long rate) 1768 { 1769 if (!clk) 1770 return 0; 1771 1772 return clk_set_rate_range(clk, rate, clk->max_rate); 1773 } 1774 EXPORT_SYMBOL_GPL(clk_set_min_rate); 1775 1776 /** 1777 * clk_set_max_rate - set a maximum clock rate for a clock source 1778 * @clk: clock source 1779 * @rate: desired maximum clock rate in Hz, inclusive 1780 * 1781 * Returns success (0) or negative errno. 1782 */ 1783 int clk_set_max_rate(struct clk *clk, unsigned long rate) 1784 { 1785 if (!clk) 1786 return 0; 1787 1788 return clk_set_rate_range(clk, clk->min_rate, rate); 1789 } 1790 EXPORT_SYMBOL_GPL(clk_set_max_rate); 1791 1792 /** 1793 * clk_get_parent - return the parent of a clk 1794 * @clk: the clk whose parent gets returned 1795 * 1796 * Simply returns clk->parent. Returns NULL if clk is NULL. 1797 */ 1798 struct clk *clk_get_parent(struct clk *clk) 1799 { 1800 struct clk *parent; 1801 1802 if (!clk) 1803 return NULL; 1804 1805 clk_prepare_lock(); 1806 /* TODO: Create a per-user clk and change callers to call clk_put */ 1807 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 1808 clk_prepare_unlock(); 1809 1810 return parent; 1811 } 1812 EXPORT_SYMBOL_GPL(clk_get_parent); 1813 1814 static struct clk_core *__clk_init_parent(struct clk_core *core) 1815 { 1816 u8 index = 0; 1817 1818 if (core->num_parents > 1 && core->ops->get_parent) 1819 index = core->ops->get_parent(core->hw); 1820 1821 return clk_core_get_parent_by_index(core, index); 1822 } 1823 1824 static void clk_core_reparent(struct clk_core *core, 1825 struct clk_core *new_parent) 1826 { 1827 clk_reparent(core, new_parent); 1828 __clk_recalc_accuracies(core); 1829 __clk_recalc_rates(core, POST_RATE_CHANGE); 1830 } 1831 1832 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 1833 { 1834 if (!hw) 1835 return; 1836 1837 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 1838 } 1839 1840 /** 1841 * clk_has_parent - check if a clock is a possible parent for another 1842 * @clk: clock source 1843 * @parent: parent clock source 1844 * 1845 * This function can be used in drivers that need to check that a clock can be 1846 * the parent of another without actually changing the parent. 1847 * 1848 * Returns true if @parent is a possible parent for @clk, false otherwise. 1849 */ 1850 bool clk_has_parent(struct clk *clk, struct clk *parent) 1851 { 1852 struct clk_core *core, *parent_core; 1853 unsigned int i; 1854 1855 /* NULL clocks should be nops, so return success if either is NULL. */ 1856 if (!clk || !parent) 1857 return true; 1858 1859 core = clk->core; 1860 parent_core = parent->core; 1861 1862 /* Optimize for the case where the parent is already the parent. */ 1863 if (core->parent == parent_core) 1864 return true; 1865 1866 for (i = 0; i < core->num_parents; i++) 1867 if (strcmp(core->parent_names[i], parent_core->name) == 0) 1868 return true; 1869 1870 return false; 1871 } 1872 EXPORT_SYMBOL_GPL(clk_has_parent); 1873 1874 static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) 1875 { 1876 int ret = 0; 1877 int p_index = 0; 1878 unsigned long p_rate = 0; 1879 1880 if (!core) 1881 return 0; 1882 1883 /* prevent racing with updates to the clock topology */ 1884 clk_prepare_lock(); 1885 1886 if (core->parent == parent) 1887 goto out; 1888 1889 /* verify ops for for multi-parent clks */ 1890 if ((core->num_parents > 1) && (!core->ops->set_parent)) { 1891 ret = -ENOSYS; 1892 goto out; 1893 } 1894 1895 /* check that we are allowed to re-parent if the clock is in use */ 1896 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 1897 ret = -EBUSY; 1898 goto out; 1899 } 1900 1901 /* try finding the new parent index */ 1902 if (parent) { 1903 p_index = clk_fetch_parent_index(core, parent); 1904 if (p_index < 0) { 1905 pr_debug("%s: clk %s can not be parent of clk %s\n", 1906 __func__, parent->name, core->name); 1907 ret = p_index; 1908 goto out; 1909 } 1910 p_rate = parent->rate; 1911 } 1912 1913 ret = clk_pm_runtime_get(core); 1914 if (ret) 1915 goto out; 1916 1917 /* propagate PRE_RATE_CHANGE notifications */ 1918 ret = __clk_speculate_rates(core, p_rate); 1919 1920 /* abort if a driver objects */ 1921 if (ret & NOTIFY_STOP_MASK) 1922 goto runtime_put; 1923 1924 /* do the re-parent */ 1925 ret = __clk_set_parent(core, parent, p_index); 1926 1927 /* propagate rate an accuracy recalculation accordingly */ 1928 if (ret) { 1929 __clk_recalc_rates(core, ABORT_RATE_CHANGE); 1930 } else { 1931 __clk_recalc_rates(core, POST_RATE_CHANGE); 1932 __clk_recalc_accuracies(core); 1933 } 1934 1935 runtime_put: 1936 clk_pm_runtime_put(core); 1937 out: 1938 clk_prepare_unlock(); 1939 1940 return ret; 1941 } 1942 1943 /** 1944 * clk_set_parent - switch the parent of a mux clk 1945 * @clk: the mux clk whose input we are switching 1946 * @parent: the new input to clk 1947 * 1948 * Re-parent clk to use parent as its new input source. If clk is in 1949 * prepared state, the clk will get enabled for the duration of this call. If 1950 * that's not acceptable for a specific clk (Eg: the consumer can't handle 1951 * that, the reparenting is glitchy in hardware, etc), use the 1952 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 1953 * 1954 * After successfully changing clk's parent clk_set_parent will update the 1955 * clk topology, sysfs topology and propagate rate recalculation via 1956 * __clk_recalc_rates. 1957 * 1958 * Returns 0 on success, -EERROR otherwise. 1959 */ 1960 int clk_set_parent(struct clk *clk, struct clk *parent) 1961 { 1962 if (!clk) 1963 return 0; 1964 1965 return clk_core_set_parent(clk->core, parent ? parent->core : NULL); 1966 } 1967 EXPORT_SYMBOL_GPL(clk_set_parent); 1968 1969 /** 1970 * clk_set_phase - adjust the phase shift of a clock signal 1971 * @clk: clock signal source 1972 * @degrees: number of degrees the signal is shifted 1973 * 1974 * Shifts the phase of a clock signal by the specified 1975 * degrees. Returns 0 on success, -EERROR otherwise. 1976 * 1977 * This function makes no distinction about the input or reference 1978 * signal that we adjust the clock signal phase against. For example 1979 * phase locked-loop clock signal generators we may shift phase with 1980 * respect to feedback clock signal input, but for other cases the 1981 * clock phase may be shifted with respect to some other, unspecified 1982 * signal. 1983 * 1984 * Additionally the concept of phase shift does not propagate through 1985 * the clock tree hierarchy, which sets it apart from clock rates and 1986 * clock accuracy. A parent clock phase attribute does not have an 1987 * impact on the phase attribute of a child clock. 1988 */ 1989 int clk_set_phase(struct clk *clk, int degrees) 1990 { 1991 int ret = -EINVAL; 1992 1993 if (!clk) 1994 return 0; 1995 1996 /* sanity check degrees */ 1997 degrees %= 360; 1998 if (degrees < 0) 1999 degrees += 360; 2000 2001 clk_prepare_lock(); 2002 2003 trace_clk_set_phase(clk->core, degrees); 2004 2005 if (clk->core->ops->set_phase) 2006 ret = clk->core->ops->set_phase(clk->core->hw, degrees); 2007 2008 trace_clk_set_phase_complete(clk->core, degrees); 2009 2010 if (!ret) 2011 clk->core->phase = degrees; 2012 2013 clk_prepare_unlock(); 2014 2015 return ret; 2016 } 2017 EXPORT_SYMBOL_GPL(clk_set_phase); 2018 2019 static int clk_core_get_phase(struct clk_core *core) 2020 { 2021 int ret; 2022 2023 clk_prepare_lock(); 2024 ret = core->phase; 2025 clk_prepare_unlock(); 2026 2027 return ret; 2028 } 2029 2030 /** 2031 * clk_get_phase - return the phase shift of a clock signal 2032 * @clk: clock signal source 2033 * 2034 * Returns the phase shift of a clock node in degrees, otherwise returns 2035 * -EERROR. 2036 */ 2037 int clk_get_phase(struct clk *clk) 2038 { 2039 if (!clk) 2040 return 0; 2041 2042 return clk_core_get_phase(clk->core); 2043 } 2044 EXPORT_SYMBOL_GPL(clk_get_phase); 2045 2046 /** 2047 * clk_is_match - check if two clk's point to the same hardware clock 2048 * @p: clk compared against q 2049 * @q: clk compared against p 2050 * 2051 * Returns true if the two struct clk pointers both point to the same hardware 2052 * clock node. Put differently, returns true if struct clk *p and struct clk *q 2053 * share the same struct clk_core object. 2054 * 2055 * Returns false otherwise. Note that two NULL clks are treated as matching. 2056 */ 2057 bool clk_is_match(const struct clk *p, const struct clk *q) 2058 { 2059 /* trivial case: identical struct clk's or both NULL */ 2060 if (p == q) 2061 return true; 2062 2063 /* true if clk->core pointers match. Avoid dereferencing garbage */ 2064 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 2065 if (p->core == q->core) 2066 return true; 2067 2068 return false; 2069 } 2070 EXPORT_SYMBOL_GPL(clk_is_match); 2071 2072 /*** debugfs support ***/ 2073 2074 #ifdef CONFIG_DEBUG_FS 2075 #include <linux/debugfs.h> 2076 2077 static struct dentry *rootdir; 2078 static int inited = 0; 2079 static DEFINE_MUTEX(clk_debug_lock); 2080 static HLIST_HEAD(clk_debug_list); 2081 2082 static struct hlist_head *all_lists[] = { 2083 &clk_root_list, 2084 &clk_orphan_list, 2085 NULL, 2086 }; 2087 2088 static struct hlist_head *orphan_list[] = { 2089 &clk_orphan_list, 2090 NULL, 2091 }; 2092 2093 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 2094 int level) 2095 { 2096 if (!c) 2097 return; 2098 2099 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", 2100 level * 3 + 1, "", 2101 30 - level * 3, c->name, 2102 c->enable_count, c->prepare_count, clk_core_get_rate(c), 2103 clk_core_get_accuracy(c), clk_core_get_phase(c)); 2104 } 2105 2106 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 2107 int level) 2108 { 2109 struct clk_core *child; 2110 2111 if (!c) 2112 return; 2113 2114 clk_summary_show_one(s, c, level); 2115 2116 hlist_for_each_entry(child, &c->children, child_node) 2117 clk_summary_show_subtree(s, child, level + 1); 2118 } 2119 2120 static int clk_summary_show(struct seq_file *s, void *data) 2121 { 2122 struct clk_core *c; 2123 struct hlist_head **lists = (struct hlist_head **)s->private; 2124 2125 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); 2126 seq_puts(s, "----------------------------------------------------------------------------------------\n"); 2127 2128 clk_prepare_lock(); 2129 2130 for (; *lists; lists++) 2131 hlist_for_each_entry(c, *lists, child_node) 2132 clk_summary_show_subtree(s, c, 0); 2133 2134 clk_prepare_unlock(); 2135 2136 return 0; 2137 } 2138 2139 2140 static int clk_summary_open(struct inode *inode, struct file *file) 2141 { 2142 return single_open(file, clk_summary_show, inode->i_private); 2143 } 2144 2145 static const struct file_operations clk_summary_fops = { 2146 .open = clk_summary_open, 2147 .read = seq_read, 2148 .llseek = seq_lseek, 2149 .release = single_release, 2150 }; 2151 2152 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 2153 { 2154 if (!c) 2155 return; 2156 2157 /* This should be JSON format, i.e. elements separated with a comma */ 2158 seq_printf(s, "\"%s\": { ", c->name); 2159 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 2160 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 2161 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 2162 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 2163 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); 2164 } 2165 2166 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 2167 { 2168 struct clk_core *child; 2169 2170 if (!c) 2171 return; 2172 2173 clk_dump_one(s, c, level); 2174 2175 hlist_for_each_entry(child, &c->children, child_node) { 2176 seq_putc(s, ','); 2177 clk_dump_subtree(s, child, level + 1); 2178 } 2179 2180 seq_putc(s, '}'); 2181 } 2182 2183 static int clk_dump(struct seq_file *s, void *data) 2184 { 2185 struct clk_core *c; 2186 bool first_node = true; 2187 struct hlist_head **lists = (struct hlist_head **)s->private; 2188 2189 seq_putc(s, '{'); 2190 clk_prepare_lock(); 2191 2192 for (; *lists; lists++) { 2193 hlist_for_each_entry(c, *lists, child_node) { 2194 if (!first_node) 2195 seq_putc(s, ','); 2196 first_node = false; 2197 clk_dump_subtree(s, c, 0); 2198 } 2199 } 2200 2201 clk_prepare_unlock(); 2202 2203 seq_puts(s, "}\n"); 2204 return 0; 2205 } 2206 2207 2208 static int clk_dump_open(struct inode *inode, struct file *file) 2209 { 2210 return single_open(file, clk_dump, inode->i_private); 2211 } 2212 2213 static const struct file_operations clk_dump_fops = { 2214 .open = clk_dump_open, 2215 .read = seq_read, 2216 .llseek = seq_lseek, 2217 .release = single_release, 2218 }; 2219 2220 static int possible_parents_dump(struct seq_file *s, void *data) 2221 { 2222 struct clk_core *core = s->private; 2223 int i; 2224 2225 for (i = 0; i < core->num_parents - 1; i++) 2226 seq_printf(s, "%s ", core->parent_names[i]); 2227 2228 seq_printf(s, "%s\n", core->parent_names[i]); 2229 2230 return 0; 2231 } 2232 2233 static int possible_parents_open(struct inode *inode, struct file *file) 2234 { 2235 return single_open(file, possible_parents_dump, inode->i_private); 2236 } 2237 2238 static const struct file_operations possible_parents_fops = { 2239 .open = possible_parents_open, 2240 .read = seq_read, 2241 .llseek = seq_lseek, 2242 .release = single_release, 2243 }; 2244 2245 static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 2246 { 2247 struct dentry *d; 2248 int ret = -ENOMEM; 2249 2250 if (!core || !pdentry) { 2251 ret = -EINVAL; 2252 goto out; 2253 } 2254 2255 d = debugfs_create_dir(core->name, pdentry); 2256 if (!d) 2257 goto out; 2258 2259 core->dentry = d; 2260 2261 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry, 2262 (u32 *)&core->rate); 2263 if (!d) 2264 goto err_out; 2265 2266 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry, 2267 (u32 *)&core->accuracy); 2268 if (!d) 2269 goto err_out; 2270 2271 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry, 2272 (u32 *)&core->phase); 2273 if (!d) 2274 goto err_out; 2275 2276 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry, 2277 (u32 *)&core->flags); 2278 if (!d) 2279 goto err_out; 2280 2281 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry, 2282 (u32 *)&core->prepare_count); 2283 if (!d) 2284 goto err_out; 2285 2286 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry, 2287 (u32 *)&core->enable_count); 2288 if (!d) 2289 goto err_out; 2290 2291 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, 2292 (u32 *)&core->notifier_count); 2293 if (!d) 2294 goto err_out; 2295 2296 if (core->num_parents > 1) { 2297 d = debugfs_create_file("clk_possible_parents", S_IRUGO, 2298 core->dentry, core, &possible_parents_fops); 2299 if (!d) 2300 goto err_out; 2301 } 2302 2303 if (core->ops->debug_init) { 2304 ret = core->ops->debug_init(core->hw, core->dentry); 2305 if (ret) 2306 goto err_out; 2307 } 2308 2309 ret = 0; 2310 goto out; 2311 2312 err_out: 2313 debugfs_remove_recursive(core->dentry); 2314 core->dentry = NULL; 2315 out: 2316 return ret; 2317 } 2318 2319 /** 2320 * clk_debug_register - add a clk node to the debugfs clk directory 2321 * @core: the clk being added to the debugfs clk directory 2322 * 2323 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 2324 * initialized. Otherwise it bails out early since the debugfs clk directory 2325 * will be created lazily by clk_debug_init as part of a late_initcall. 2326 */ 2327 static int clk_debug_register(struct clk_core *core) 2328 { 2329 int ret = 0; 2330 2331 mutex_lock(&clk_debug_lock); 2332 hlist_add_head(&core->debug_node, &clk_debug_list); 2333 2334 if (!inited) 2335 goto unlock; 2336 2337 ret = clk_debug_create_one(core, rootdir); 2338 unlock: 2339 mutex_unlock(&clk_debug_lock); 2340 2341 return ret; 2342 } 2343 2344 /** 2345 * clk_debug_unregister - remove a clk node from the debugfs clk directory 2346 * @core: the clk being removed from the debugfs clk directory 2347 * 2348 * Dynamically removes a clk and all its child nodes from the 2349 * debugfs clk directory if clk->dentry points to debugfs created by 2350 * clk_debug_register in __clk_core_init. 2351 */ 2352 static void clk_debug_unregister(struct clk_core *core) 2353 { 2354 mutex_lock(&clk_debug_lock); 2355 hlist_del_init(&core->debug_node); 2356 debugfs_remove_recursive(core->dentry); 2357 core->dentry = NULL; 2358 mutex_unlock(&clk_debug_lock); 2359 } 2360 2361 struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, 2362 void *data, const struct file_operations *fops) 2363 { 2364 struct dentry *d = NULL; 2365 2366 if (hw->core->dentry) 2367 d = debugfs_create_file(name, mode, hw->core->dentry, data, 2368 fops); 2369 2370 return d; 2371 } 2372 EXPORT_SYMBOL_GPL(clk_debugfs_add_file); 2373 2374 /** 2375 * clk_debug_init - lazily populate the debugfs clk directory 2376 * 2377 * clks are often initialized very early during boot before memory can be 2378 * dynamically allocated and well before debugfs is setup. This function 2379 * populates the debugfs clk directory once at boot-time when we know that 2380 * debugfs is setup. It should only be called once at boot-time, all other clks 2381 * added dynamically will be done so with clk_debug_register. 2382 */ 2383 static int __init clk_debug_init(void) 2384 { 2385 struct clk_core *core; 2386 struct dentry *d; 2387 2388 rootdir = debugfs_create_dir("clk", NULL); 2389 2390 if (!rootdir) 2391 return -ENOMEM; 2392 2393 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, 2394 &clk_summary_fops); 2395 if (!d) 2396 return -ENOMEM; 2397 2398 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, 2399 &clk_dump_fops); 2400 if (!d) 2401 return -ENOMEM; 2402 2403 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, 2404 &orphan_list, &clk_summary_fops); 2405 if (!d) 2406 return -ENOMEM; 2407 2408 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, 2409 &orphan_list, &clk_dump_fops); 2410 if (!d) 2411 return -ENOMEM; 2412 2413 mutex_lock(&clk_debug_lock); 2414 hlist_for_each_entry(core, &clk_debug_list, debug_node) 2415 clk_debug_create_one(core, rootdir); 2416 2417 inited = 1; 2418 mutex_unlock(&clk_debug_lock); 2419 2420 return 0; 2421 } 2422 late_initcall(clk_debug_init); 2423 #else 2424 static inline int clk_debug_register(struct clk_core *core) { return 0; } 2425 static inline void clk_debug_reparent(struct clk_core *core, 2426 struct clk_core *new_parent) 2427 { 2428 } 2429 static inline void clk_debug_unregister(struct clk_core *core) 2430 { 2431 } 2432 #endif 2433 2434 /** 2435 * __clk_core_init - initialize the data structures in a struct clk_core 2436 * @core: clk_core being initialized 2437 * 2438 * Initializes the lists in struct clk_core, queries the hardware for the 2439 * parent and rate and sets them both. 2440 */ 2441 static int __clk_core_init(struct clk_core *core) 2442 { 2443 int i, ret; 2444 struct clk_core *orphan; 2445 struct hlist_node *tmp2; 2446 unsigned long rate; 2447 2448 if (!core) 2449 return -EINVAL; 2450 2451 clk_prepare_lock(); 2452 2453 ret = clk_pm_runtime_get(core); 2454 if (ret) 2455 goto unlock; 2456 2457 /* check to see if a clock with this name is already registered */ 2458 if (clk_core_lookup(core->name)) { 2459 pr_debug("%s: clk %s already initialized\n", 2460 __func__, core->name); 2461 ret = -EEXIST; 2462 goto out; 2463 } 2464 2465 /* check that clk_ops are sane. See Documentation/clk.txt */ 2466 if (core->ops->set_rate && 2467 !((core->ops->round_rate || core->ops->determine_rate) && 2468 core->ops->recalc_rate)) { 2469 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 2470 __func__, core->name); 2471 ret = -EINVAL; 2472 goto out; 2473 } 2474 2475 if (core->ops->set_parent && !core->ops->get_parent) { 2476 pr_err("%s: %s must implement .get_parent & .set_parent\n", 2477 __func__, core->name); 2478 ret = -EINVAL; 2479 goto out; 2480 } 2481 2482 if (core->num_parents > 1 && !core->ops->get_parent) { 2483 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 2484 __func__, core->name); 2485 ret = -EINVAL; 2486 goto out; 2487 } 2488 2489 if (core->ops->set_rate_and_parent && 2490 !(core->ops->set_parent && core->ops->set_rate)) { 2491 pr_err("%s: %s must implement .set_parent & .set_rate\n", 2492 __func__, core->name); 2493 ret = -EINVAL; 2494 goto out; 2495 } 2496 2497 /* throw a WARN if any entries in parent_names are NULL */ 2498 for (i = 0; i < core->num_parents; i++) 2499 WARN(!core->parent_names[i], 2500 "%s: invalid NULL in %s's .parent_names\n", 2501 __func__, core->name); 2502 2503 core->parent = __clk_init_parent(core); 2504 2505 /* 2506 * Populate core->parent if parent has already been clk_core_init'd. If 2507 * parent has not yet been clk_core_init'd then place clk in the orphan 2508 * list. If clk doesn't have any parents then place it in the root 2509 * clk list. 2510 * 2511 * Every time a new clk is clk_init'd then we walk the list of orphan 2512 * clocks and re-parent any that are children of the clock currently 2513 * being clk_init'd. 2514 */ 2515 if (core->parent) { 2516 hlist_add_head(&core->child_node, 2517 &core->parent->children); 2518 core->orphan = core->parent->orphan; 2519 } else if (!core->num_parents) { 2520 hlist_add_head(&core->child_node, &clk_root_list); 2521 core->orphan = false; 2522 } else { 2523 hlist_add_head(&core->child_node, &clk_orphan_list); 2524 core->orphan = true; 2525 } 2526 2527 /* 2528 * Set clk's accuracy. The preferred method is to use 2529 * .recalc_accuracy. For simple clocks and lazy developers the default 2530 * fallback is to use the parent's accuracy. If a clock doesn't have a 2531 * parent (or is orphaned) then accuracy is set to zero (perfect 2532 * clock). 2533 */ 2534 if (core->ops->recalc_accuracy) 2535 core->accuracy = core->ops->recalc_accuracy(core->hw, 2536 __clk_get_accuracy(core->parent)); 2537 else if (core->parent) 2538 core->accuracy = core->parent->accuracy; 2539 else 2540 core->accuracy = 0; 2541 2542 /* 2543 * Set clk's phase. 2544 * Since a phase is by definition relative to its parent, just 2545 * query the current clock phase, or just assume it's in phase. 2546 */ 2547 if (core->ops->get_phase) 2548 core->phase = core->ops->get_phase(core->hw); 2549 else 2550 core->phase = 0; 2551 2552 /* 2553 * Set clk's rate. The preferred method is to use .recalc_rate. For 2554 * simple clocks and lazy developers the default fallback is to use the 2555 * parent's rate. If a clock doesn't have a parent (or is orphaned) 2556 * then rate is set to zero. 2557 */ 2558 if (core->ops->recalc_rate) 2559 rate = core->ops->recalc_rate(core->hw, 2560 clk_core_get_rate_nolock(core->parent)); 2561 else if (core->parent) 2562 rate = core->parent->rate; 2563 else 2564 rate = 0; 2565 core->rate = core->req_rate = rate; 2566 2567 /* 2568 * walk the list of orphan clocks and reparent any that newly finds a 2569 * parent. 2570 */ 2571 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 2572 struct clk_core *parent = __clk_init_parent(orphan); 2573 2574 /* 2575 * we could call __clk_set_parent, but that would result in a 2576 * redundant call to the .set_rate op, if it exists 2577 */ 2578 if (parent) { 2579 __clk_set_parent_before(orphan, parent); 2580 __clk_set_parent_after(orphan, parent, NULL); 2581 __clk_recalc_accuracies(orphan); 2582 __clk_recalc_rates(orphan, 0); 2583 } 2584 } 2585 2586 /* 2587 * optional platform-specific magic 2588 * 2589 * The .init callback is not used by any of the basic clock types, but 2590 * exists for weird hardware that must perform initialization magic. 2591 * Please consider other ways of solving initialization problems before 2592 * using this callback, as its use is discouraged. 2593 */ 2594 if (core->ops->init) 2595 core->ops->init(core->hw); 2596 2597 if (core->flags & CLK_IS_CRITICAL) { 2598 unsigned long flags; 2599 2600 clk_core_prepare(core); 2601 2602 flags = clk_enable_lock(); 2603 clk_core_enable(core); 2604 clk_enable_unlock(flags); 2605 } 2606 2607 kref_init(&core->ref); 2608 out: 2609 clk_pm_runtime_put(core); 2610 unlock: 2611 clk_prepare_unlock(); 2612 2613 if (!ret) 2614 clk_debug_register(core); 2615 2616 return ret; 2617 } 2618 2619 struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, 2620 const char *con_id) 2621 { 2622 struct clk *clk; 2623 2624 /* This is to allow this function to be chained to others */ 2625 if (IS_ERR_OR_NULL(hw)) 2626 return ERR_CAST(hw); 2627 2628 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2629 if (!clk) 2630 return ERR_PTR(-ENOMEM); 2631 2632 clk->core = hw->core; 2633 clk->dev_id = dev_id; 2634 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 2635 clk->max_rate = ULONG_MAX; 2636 2637 clk_prepare_lock(); 2638 hlist_add_head(&clk->clks_node, &hw->core->clks); 2639 clk_prepare_unlock(); 2640 2641 return clk; 2642 } 2643 2644 void __clk_free_clk(struct clk *clk) 2645 { 2646 clk_prepare_lock(); 2647 hlist_del(&clk->clks_node); 2648 clk_prepare_unlock(); 2649 2650 kfree_const(clk->con_id); 2651 kfree(clk); 2652 } 2653 2654 /** 2655 * clk_register - allocate a new clock, register it and return an opaque cookie 2656 * @dev: device that is registering this clock 2657 * @hw: link to hardware-specific clock data 2658 * 2659 * clk_register is the primary interface for populating the clock tree with new 2660 * clock nodes. It returns a pointer to the newly allocated struct clk which 2661 * cannot be dereferenced by driver code but may be used in conjunction with the 2662 * rest of the clock API. In the event of an error clk_register will return an 2663 * error code; drivers must test for an error code after calling clk_register. 2664 */ 2665 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2666 { 2667 int i, ret; 2668 struct clk_core *core; 2669 2670 core = kzalloc(sizeof(*core), GFP_KERNEL); 2671 if (!core) { 2672 ret = -ENOMEM; 2673 goto fail_out; 2674 } 2675 2676 core->name = kstrdup_const(hw->init->name, GFP_KERNEL); 2677 if (!core->name) { 2678 ret = -ENOMEM; 2679 goto fail_name; 2680 } 2681 core->ops = hw->init->ops; 2682 if (dev && pm_runtime_enabled(dev)) 2683 core->dev = dev; 2684 if (dev && dev->driver) 2685 core->owner = dev->driver->owner; 2686 core->hw = hw; 2687 core->flags = hw->init->flags; 2688 core->num_parents = hw->init->num_parents; 2689 core->min_rate = 0; 2690 core->max_rate = ULONG_MAX; 2691 hw->core = core; 2692 2693 /* allocate local copy in case parent_names is __initdata */ 2694 core->parent_names = kcalloc(core->num_parents, sizeof(char *), 2695 GFP_KERNEL); 2696 2697 if (!core->parent_names) { 2698 ret = -ENOMEM; 2699 goto fail_parent_names; 2700 } 2701 2702 2703 /* copy each string name in case parent_names is __initdata */ 2704 for (i = 0; i < core->num_parents; i++) { 2705 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], 2706 GFP_KERNEL); 2707 if (!core->parent_names[i]) { 2708 ret = -ENOMEM; 2709 goto fail_parent_names_copy; 2710 } 2711 } 2712 2713 /* avoid unnecessary string look-ups of clk_core's possible parents. */ 2714 core->parents = kcalloc(core->num_parents, sizeof(*core->parents), 2715 GFP_KERNEL); 2716 if (!core->parents) { 2717 ret = -ENOMEM; 2718 goto fail_parents; 2719 }; 2720 2721 INIT_HLIST_HEAD(&core->clks); 2722 2723 hw->clk = __clk_create_clk(hw, NULL, NULL); 2724 if (IS_ERR(hw->clk)) { 2725 ret = PTR_ERR(hw->clk); 2726 goto fail_parents; 2727 } 2728 2729 ret = __clk_core_init(core); 2730 if (!ret) 2731 return hw->clk; 2732 2733 __clk_free_clk(hw->clk); 2734 hw->clk = NULL; 2735 2736 fail_parents: 2737 kfree(core->parents); 2738 fail_parent_names_copy: 2739 while (--i >= 0) 2740 kfree_const(core->parent_names[i]); 2741 kfree(core->parent_names); 2742 fail_parent_names: 2743 kfree_const(core->name); 2744 fail_name: 2745 kfree(core); 2746 fail_out: 2747 return ERR_PTR(ret); 2748 } 2749 EXPORT_SYMBOL_GPL(clk_register); 2750 2751 /** 2752 * clk_hw_register - register a clk_hw and return an error code 2753 * @dev: device that is registering this clock 2754 * @hw: link to hardware-specific clock data 2755 * 2756 * clk_hw_register is the primary interface for populating the clock tree with 2757 * new clock nodes. It returns an integer equal to zero indicating success or 2758 * less than zero indicating failure. Drivers must test for an error code after 2759 * calling clk_hw_register(). 2760 */ 2761 int clk_hw_register(struct device *dev, struct clk_hw *hw) 2762 { 2763 return PTR_ERR_OR_ZERO(clk_register(dev, hw)); 2764 } 2765 EXPORT_SYMBOL_GPL(clk_hw_register); 2766 2767 /* Free memory allocated for a clock. */ 2768 static void __clk_release(struct kref *ref) 2769 { 2770 struct clk_core *core = container_of(ref, struct clk_core, ref); 2771 int i = core->num_parents; 2772 2773 lockdep_assert_held(&prepare_lock); 2774 2775 kfree(core->parents); 2776 while (--i >= 0) 2777 kfree_const(core->parent_names[i]); 2778 2779 kfree(core->parent_names); 2780 kfree_const(core->name); 2781 kfree(core); 2782 } 2783 2784 /* 2785 * Empty clk_ops for unregistered clocks. These are used temporarily 2786 * after clk_unregister() was called on a clock and until last clock 2787 * consumer calls clk_put() and the struct clk object is freed. 2788 */ 2789 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 2790 { 2791 return -ENXIO; 2792 } 2793 2794 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 2795 { 2796 WARN_ON_ONCE(1); 2797 } 2798 2799 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 2800 unsigned long parent_rate) 2801 { 2802 return -ENXIO; 2803 } 2804 2805 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 2806 { 2807 return -ENXIO; 2808 } 2809 2810 static const struct clk_ops clk_nodrv_ops = { 2811 .enable = clk_nodrv_prepare_enable, 2812 .disable = clk_nodrv_disable_unprepare, 2813 .prepare = clk_nodrv_prepare_enable, 2814 .unprepare = clk_nodrv_disable_unprepare, 2815 .set_rate = clk_nodrv_set_rate, 2816 .set_parent = clk_nodrv_set_parent, 2817 }; 2818 2819 /** 2820 * clk_unregister - unregister a currently registered clock 2821 * @clk: clock to unregister 2822 */ 2823 void clk_unregister(struct clk *clk) 2824 { 2825 unsigned long flags; 2826 2827 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2828 return; 2829 2830 clk_debug_unregister(clk->core); 2831 2832 clk_prepare_lock(); 2833 2834 if (clk->core->ops == &clk_nodrv_ops) { 2835 pr_err("%s: unregistered clock: %s\n", __func__, 2836 clk->core->name); 2837 goto unlock; 2838 } 2839 /* 2840 * Assign empty clock ops for consumers that might still hold 2841 * a reference to this clock. 2842 */ 2843 flags = clk_enable_lock(); 2844 clk->core->ops = &clk_nodrv_ops; 2845 clk_enable_unlock(flags); 2846 2847 if (!hlist_empty(&clk->core->children)) { 2848 struct clk_core *child; 2849 struct hlist_node *t; 2850 2851 /* Reparent all children to the orphan list. */ 2852 hlist_for_each_entry_safe(child, t, &clk->core->children, 2853 child_node) 2854 clk_core_set_parent(child, NULL); 2855 } 2856 2857 hlist_del_init(&clk->core->child_node); 2858 2859 if (clk->core->prepare_count) 2860 pr_warn("%s: unregistering prepared clock: %s\n", 2861 __func__, clk->core->name); 2862 kref_put(&clk->core->ref, __clk_release); 2863 unlock: 2864 clk_prepare_unlock(); 2865 } 2866 EXPORT_SYMBOL_GPL(clk_unregister); 2867 2868 /** 2869 * clk_hw_unregister - unregister a currently registered clk_hw 2870 * @hw: hardware-specific clock data to unregister 2871 */ 2872 void clk_hw_unregister(struct clk_hw *hw) 2873 { 2874 clk_unregister(hw->clk); 2875 } 2876 EXPORT_SYMBOL_GPL(clk_hw_unregister); 2877 2878 static void devm_clk_release(struct device *dev, void *res) 2879 { 2880 clk_unregister(*(struct clk **)res); 2881 } 2882 2883 static void devm_clk_hw_release(struct device *dev, void *res) 2884 { 2885 clk_hw_unregister(*(struct clk_hw **)res); 2886 } 2887 2888 /** 2889 * devm_clk_register - resource managed clk_register() 2890 * @dev: device that is registering this clock 2891 * @hw: link to hardware-specific clock data 2892 * 2893 * Managed clk_register(). Clocks returned from this function are 2894 * automatically clk_unregister()ed on driver detach. See clk_register() for 2895 * more information. 2896 */ 2897 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2898 { 2899 struct clk *clk; 2900 struct clk **clkp; 2901 2902 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 2903 if (!clkp) 2904 return ERR_PTR(-ENOMEM); 2905 2906 clk = clk_register(dev, hw); 2907 if (!IS_ERR(clk)) { 2908 *clkp = clk; 2909 devres_add(dev, clkp); 2910 } else { 2911 devres_free(clkp); 2912 } 2913 2914 return clk; 2915 } 2916 EXPORT_SYMBOL_GPL(devm_clk_register); 2917 2918 /** 2919 * devm_clk_hw_register - resource managed clk_hw_register() 2920 * @dev: device that is registering this clock 2921 * @hw: link to hardware-specific clock data 2922 * 2923 * Managed clk_hw_register(). Clocks registered by this function are 2924 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 2925 * for more information. 2926 */ 2927 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 2928 { 2929 struct clk_hw **hwp; 2930 int ret; 2931 2932 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); 2933 if (!hwp) 2934 return -ENOMEM; 2935 2936 ret = clk_hw_register(dev, hw); 2937 if (!ret) { 2938 *hwp = hw; 2939 devres_add(dev, hwp); 2940 } else { 2941 devres_free(hwp); 2942 } 2943 2944 return ret; 2945 } 2946 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 2947 2948 static int devm_clk_match(struct device *dev, void *res, void *data) 2949 { 2950 struct clk *c = res; 2951 if (WARN_ON(!c)) 2952 return 0; 2953 return c == data; 2954 } 2955 2956 static int devm_clk_hw_match(struct device *dev, void *res, void *data) 2957 { 2958 struct clk_hw *hw = res; 2959 2960 if (WARN_ON(!hw)) 2961 return 0; 2962 return hw == data; 2963 } 2964 2965 /** 2966 * devm_clk_unregister - resource managed clk_unregister() 2967 * @clk: clock to unregister 2968 * 2969 * Deallocate a clock allocated with devm_clk_register(). Normally 2970 * this function will not need to be called and the resource management 2971 * code will ensure that the resource is freed. 2972 */ 2973 void devm_clk_unregister(struct device *dev, struct clk *clk) 2974 { 2975 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 2976 } 2977 EXPORT_SYMBOL_GPL(devm_clk_unregister); 2978 2979 /** 2980 * devm_clk_hw_unregister - resource managed clk_hw_unregister() 2981 * @dev: device that is unregistering the hardware-specific clock data 2982 * @hw: link to hardware-specific clock data 2983 * 2984 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally 2985 * this function will not need to be called and the resource management 2986 * code will ensure that the resource is freed. 2987 */ 2988 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) 2989 { 2990 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, 2991 hw)); 2992 } 2993 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); 2994 2995 /* 2996 * clkdev helpers 2997 */ 2998 int __clk_get(struct clk *clk) 2999 { 3000 struct clk_core *core = !clk ? NULL : clk->core; 3001 3002 if (core) { 3003 if (!try_module_get(core->owner)) 3004 return 0; 3005 3006 kref_get(&core->ref); 3007 } 3008 return 1; 3009 } 3010 3011 void __clk_put(struct clk *clk) 3012 { 3013 struct module *owner; 3014 3015 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 3016 return; 3017 3018 clk_prepare_lock(); 3019 3020 hlist_del(&clk->clks_node); 3021 if (clk->min_rate > clk->core->req_rate || 3022 clk->max_rate < clk->core->req_rate) 3023 clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 3024 3025 owner = clk->core->owner; 3026 kref_put(&clk->core->ref, __clk_release); 3027 3028 clk_prepare_unlock(); 3029 3030 module_put(owner); 3031 3032 kfree(clk); 3033 } 3034 3035 /*** clk rate change notifiers ***/ 3036 3037 /** 3038 * clk_notifier_register - add a clk rate change notifier 3039 * @clk: struct clk * to watch 3040 * @nb: struct notifier_block * with callback info 3041 * 3042 * Request notification when clk's rate changes. This uses an SRCU 3043 * notifier because we want it to block and notifier unregistrations are 3044 * uncommon. The callbacks associated with the notifier must not 3045 * re-enter into the clk framework by calling any top-level clk APIs; 3046 * this will cause a nested prepare_lock mutex. 3047 * 3048 * In all notification cases (pre, post and abort rate change) the original 3049 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 3050 * and the new frequency is passed via struct clk_notifier_data.new_rate. 3051 * 3052 * clk_notifier_register() must be called from non-atomic context. 3053 * Returns -EINVAL if called with null arguments, -ENOMEM upon 3054 * allocation failure; otherwise, passes along the return value of 3055 * srcu_notifier_chain_register(). 3056 */ 3057 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 3058 { 3059 struct clk_notifier *cn; 3060 int ret = -ENOMEM; 3061 3062 if (!clk || !nb) 3063 return -EINVAL; 3064 3065 clk_prepare_lock(); 3066 3067 /* search the list of notifiers for this clk */ 3068 list_for_each_entry(cn, &clk_notifier_list, node) 3069 if (cn->clk == clk) 3070 break; 3071 3072 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 3073 if (cn->clk != clk) { 3074 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 3075 if (!cn) 3076 goto out; 3077 3078 cn->clk = clk; 3079 srcu_init_notifier_head(&cn->notifier_head); 3080 3081 list_add(&cn->node, &clk_notifier_list); 3082 } 3083 3084 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 3085 3086 clk->core->notifier_count++; 3087 3088 out: 3089 clk_prepare_unlock(); 3090 3091 return ret; 3092 } 3093 EXPORT_SYMBOL_GPL(clk_notifier_register); 3094 3095 /** 3096 * clk_notifier_unregister - remove a clk rate change notifier 3097 * @clk: struct clk * 3098 * @nb: struct notifier_block * with callback info 3099 * 3100 * Request no further notification for changes to 'clk' and frees memory 3101 * allocated in clk_notifier_register. 3102 * 3103 * Returns -EINVAL if called with null arguments; otherwise, passes 3104 * along the return value of srcu_notifier_chain_unregister(). 3105 */ 3106 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 3107 { 3108 struct clk_notifier *cn = NULL; 3109 int ret = -EINVAL; 3110 3111 if (!clk || !nb) 3112 return -EINVAL; 3113 3114 clk_prepare_lock(); 3115 3116 list_for_each_entry(cn, &clk_notifier_list, node) 3117 if (cn->clk == clk) 3118 break; 3119 3120 if (cn->clk == clk) { 3121 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 3122 3123 clk->core->notifier_count--; 3124 3125 /* XXX the notifier code should handle this better */ 3126 if (!cn->notifier_head.head) { 3127 srcu_cleanup_notifier_head(&cn->notifier_head); 3128 list_del(&cn->node); 3129 kfree(cn); 3130 } 3131 3132 } else { 3133 ret = -ENOENT; 3134 } 3135 3136 clk_prepare_unlock(); 3137 3138 return ret; 3139 } 3140 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 3141 3142 #ifdef CONFIG_OF 3143 /** 3144 * struct of_clk_provider - Clock provider registration structure 3145 * @link: Entry in global list of clock providers 3146 * @node: Pointer to device tree node of clock provider 3147 * @get: Get clock callback. Returns NULL or a struct clk for the 3148 * given clock specifier 3149 * @data: context pointer to be passed into @get callback 3150 */ 3151 struct of_clk_provider { 3152 struct list_head link; 3153 3154 struct device_node *node; 3155 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 3156 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 3157 void *data; 3158 }; 3159 3160 static const struct of_device_id __clk_of_table_sentinel 3161 __used __section(__clk_of_table_end); 3162 3163 static LIST_HEAD(of_clk_providers); 3164 static DEFINE_MUTEX(of_clk_mutex); 3165 3166 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 3167 void *data) 3168 { 3169 return data; 3170 } 3171 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 3172 3173 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 3174 { 3175 return data; 3176 } 3177 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 3178 3179 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 3180 { 3181 struct clk_onecell_data *clk_data = data; 3182 unsigned int idx = clkspec->args[0]; 3183 3184 if (idx >= clk_data->clk_num) { 3185 pr_err("%s: invalid clock index %u\n", __func__, idx); 3186 return ERR_PTR(-EINVAL); 3187 } 3188 3189 return clk_data->clks[idx]; 3190 } 3191 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 3192 3193 struct clk_hw * 3194 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 3195 { 3196 struct clk_hw_onecell_data *hw_data = data; 3197 unsigned int idx = clkspec->args[0]; 3198 3199 if (idx >= hw_data->num) { 3200 pr_err("%s: invalid index %u\n", __func__, idx); 3201 return ERR_PTR(-EINVAL); 3202 } 3203 3204 return hw_data->hws[idx]; 3205 } 3206 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 3207 3208 /** 3209 * of_clk_add_provider() - Register a clock provider for a node 3210 * @np: Device node pointer associated with clock provider 3211 * @clk_src_get: callback for decoding clock 3212 * @data: context pointer for @clk_src_get callback. 3213 */ 3214 int of_clk_add_provider(struct device_node *np, 3215 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 3216 void *data), 3217 void *data) 3218 { 3219 struct of_clk_provider *cp; 3220 int ret; 3221 3222 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 3223 if (!cp) 3224 return -ENOMEM; 3225 3226 cp->node = of_node_get(np); 3227 cp->data = data; 3228 cp->get = clk_src_get; 3229 3230 mutex_lock(&of_clk_mutex); 3231 list_add(&cp->link, &of_clk_providers); 3232 mutex_unlock(&of_clk_mutex); 3233 pr_debug("Added clock from %pOF\n", np); 3234 3235 ret = of_clk_set_defaults(np, true); 3236 if (ret < 0) 3237 of_clk_del_provider(np); 3238 3239 return ret; 3240 } 3241 EXPORT_SYMBOL_GPL(of_clk_add_provider); 3242 3243 /** 3244 * of_clk_add_hw_provider() - Register a clock provider for a node 3245 * @np: Device node pointer associated with clock provider 3246 * @get: callback for decoding clk_hw 3247 * @data: context pointer for @get callback. 3248 */ 3249 int of_clk_add_hw_provider(struct device_node *np, 3250 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 3251 void *data), 3252 void *data) 3253 { 3254 struct of_clk_provider *cp; 3255 int ret; 3256 3257 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 3258 if (!cp) 3259 return -ENOMEM; 3260 3261 cp->node = of_node_get(np); 3262 cp->data = data; 3263 cp->get_hw = get; 3264 3265 mutex_lock(&of_clk_mutex); 3266 list_add(&cp->link, &of_clk_providers); 3267 mutex_unlock(&of_clk_mutex); 3268 pr_debug("Added clk_hw provider from %pOF\n", np); 3269 3270 ret = of_clk_set_defaults(np, true); 3271 if (ret < 0) 3272 of_clk_del_provider(np); 3273 3274 return ret; 3275 } 3276 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 3277 3278 static void devm_of_clk_release_provider(struct device *dev, void *res) 3279 { 3280 of_clk_del_provider(*(struct device_node **)res); 3281 } 3282 3283 int devm_of_clk_add_hw_provider(struct device *dev, 3284 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 3285 void *data), 3286 void *data) 3287 { 3288 struct device_node **ptr, *np; 3289 int ret; 3290 3291 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 3292 GFP_KERNEL); 3293 if (!ptr) 3294 return -ENOMEM; 3295 3296 np = dev->of_node; 3297 ret = of_clk_add_hw_provider(np, get, data); 3298 if (!ret) { 3299 *ptr = np; 3300 devres_add(dev, ptr); 3301 } else { 3302 devres_free(ptr); 3303 } 3304 3305 return ret; 3306 } 3307 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 3308 3309 /** 3310 * of_clk_del_provider() - Remove a previously registered clock provider 3311 * @np: Device node pointer associated with clock provider 3312 */ 3313 void of_clk_del_provider(struct device_node *np) 3314 { 3315 struct of_clk_provider *cp; 3316 3317 mutex_lock(&of_clk_mutex); 3318 list_for_each_entry(cp, &of_clk_providers, link) { 3319 if (cp->node == np) { 3320 list_del(&cp->link); 3321 of_node_put(cp->node); 3322 kfree(cp); 3323 break; 3324 } 3325 } 3326 mutex_unlock(&of_clk_mutex); 3327 } 3328 EXPORT_SYMBOL_GPL(of_clk_del_provider); 3329 3330 static int devm_clk_provider_match(struct device *dev, void *res, void *data) 3331 { 3332 struct device_node **np = res; 3333 3334 if (WARN_ON(!np || !*np)) 3335 return 0; 3336 3337 return *np == data; 3338 } 3339 3340 void devm_of_clk_del_provider(struct device *dev) 3341 { 3342 int ret; 3343 3344 ret = devres_release(dev, devm_of_clk_release_provider, 3345 devm_clk_provider_match, dev->of_node); 3346 3347 WARN_ON(ret); 3348 } 3349 EXPORT_SYMBOL(devm_of_clk_del_provider); 3350 3351 static struct clk_hw * 3352 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 3353 struct of_phandle_args *clkspec) 3354 { 3355 struct clk *clk; 3356 3357 if (provider->get_hw) 3358 return provider->get_hw(clkspec, provider->data); 3359 3360 clk = provider->get(clkspec, provider->data); 3361 if (IS_ERR(clk)) 3362 return ERR_CAST(clk); 3363 return __clk_get_hw(clk); 3364 } 3365 3366 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, 3367 const char *dev_id, const char *con_id) 3368 { 3369 struct of_clk_provider *provider; 3370 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 3371 struct clk_hw *hw; 3372 3373 if (!clkspec) 3374 return ERR_PTR(-EINVAL); 3375 3376 /* Check if we have such a provider in our array */ 3377 mutex_lock(&of_clk_mutex); 3378 list_for_each_entry(provider, &of_clk_providers, link) { 3379 if (provider->node == clkspec->np) { 3380 hw = __of_clk_get_hw_from_provider(provider, clkspec); 3381 clk = __clk_create_clk(hw, dev_id, con_id); 3382 } 3383 3384 if (!IS_ERR(clk)) { 3385 if (!__clk_get(clk)) { 3386 __clk_free_clk(clk); 3387 clk = ERR_PTR(-ENOENT); 3388 } 3389 3390 break; 3391 } 3392 } 3393 mutex_unlock(&of_clk_mutex); 3394 3395 return clk; 3396 } 3397 3398 /** 3399 * of_clk_get_from_provider() - Lookup a clock from a clock provider 3400 * @clkspec: pointer to a clock specifier data structure 3401 * 3402 * This function looks up a struct clk from the registered list of clock 3403 * providers, an input is a clock specifier data structure as returned 3404 * from the of_parse_phandle_with_args() function call. 3405 */ 3406 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 3407 { 3408 return __of_clk_get_from_provider(clkspec, NULL, __func__); 3409 } 3410 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 3411 3412 /** 3413 * of_clk_get_parent_count() - Count the number of clocks a device node has 3414 * @np: device node to count 3415 * 3416 * Returns: The number of clocks that are possible parents of this node 3417 */ 3418 unsigned int of_clk_get_parent_count(struct device_node *np) 3419 { 3420 int count; 3421 3422 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 3423 if (count < 0) 3424 return 0; 3425 3426 return count; 3427 } 3428 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 3429 3430 const char *of_clk_get_parent_name(struct device_node *np, int index) 3431 { 3432 struct of_phandle_args clkspec; 3433 struct property *prop; 3434 const char *clk_name; 3435 const __be32 *vp; 3436 u32 pv; 3437 int rc; 3438 int count; 3439 struct clk *clk; 3440 3441 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 3442 &clkspec); 3443 if (rc) 3444 return NULL; 3445 3446 index = clkspec.args_count ? clkspec.args[0] : 0; 3447 count = 0; 3448 3449 /* if there is an indices property, use it to transfer the index 3450 * specified into an array offset for the clock-output-names property. 3451 */ 3452 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 3453 if (index == pv) { 3454 index = count; 3455 break; 3456 } 3457 count++; 3458 } 3459 /* We went off the end of 'clock-indices' without finding it */ 3460 if (prop && !vp) 3461 return NULL; 3462 3463 if (of_property_read_string_index(clkspec.np, "clock-output-names", 3464 index, 3465 &clk_name) < 0) { 3466 /* 3467 * Best effort to get the name if the clock has been 3468 * registered with the framework. If the clock isn't 3469 * registered, we return the node name as the name of 3470 * the clock as long as #clock-cells = 0. 3471 */ 3472 clk = of_clk_get_from_provider(&clkspec); 3473 if (IS_ERR(clk)) { 3474 if (clkspec.args_count == 0) 3475 clk_name = clkspec.np->name; 3476 else 3477 clk_name = NULL; 3478 } else { 3479 clk_name = __clk_get_name(clk); 3480 clk_put(clk); 3481 } 3482 } 3483 3484 3485 of_node_put(clkspec.np); 3486 return clk_name; 3487 } 3488 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 3489 3490 /** 3491 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 3492 * number of parents 3493 * @np: Device node pointer associated with clock provider 3494 * @parents: pointer to char array that hold the parents' names 3495 * @size: size of the @parents array 3496 * 3497 * Return: number of parents for the clock node. 3498 */ 3499 int of_clk_parent_fill(struct device_node *np, const char **parents, 3500 unsigned int size) 3501 { 3502 unsigned int i = 0; 3503 3504 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 3505 i++; 3506 3507 return i; 3508 } 3509 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 3510 3511 struct clock_provider { 3512 of_clk_init_cb_t clk_init_cb; 3513 struct device_node *np; 3514 struct list_head node; 3515 }; 3516 3517 /* 3518 * This function looks for a parent clock. If there is one, then it 3519 * checks that the provider for this parent clock was initialized, in 3520 * this case the parent clock will be ready. 3521 */ 3522 static int parent_ready(struct device_node *np) 3523 { 3524 int i = 0; 3525 3526 while (true) { 3527 struct clk *clk = of_clk_get(np, i); 3528 3529 /* this parent is ready we can check the next one */ 3530 if (!IS_ERR(clk)) { 3531 clk_put(clk); 3532 i++; 3533 continue; 3534 } 3535 3536 /* at least one parent is not ready, we exit now */ 3537 if (PTR_ERR(clk) == -EPROBE_DEFER) 3538 return 0; 3539 3540 /* 3541 * Here we make assumption that the device tree is 3542 * written correctly. So an error means that there is 3543 * no more parent. As we didn't exit yet, then the 3544 * previous parent are ready. If there is no clock 3545 * parent, no need to wait for them, then we can 3546 * consider their absence as being ready 3547 */ 3548 return 1; 3549 } 3550 } 3551 3552 /** 3553 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 3554 * @np: Device node pointer associated with clock provider 3555 * @index: clock index 3556 * @flags: pointer to clk_core->flags 3557 * 3558 * Detects if the clock-critical property exists and, if so, sets the 3559 * corresponding CLK_IS_CRITICAL flag. 3560 * 3561 * Do not use this function. It exists only for legacy Device Tree 3562 * bindings, such as the one-clock-per-node style that are outdated. 3563 * Those bindings typically put all clock data into .dts and the Linux 3564 * driver has no clock data, thus making it impossible to set this flag 3565 * correctly from the driver. Only those drivers may call 3566 * of_clk_detect_critical from their setup functions. 3567 * 3568 * Return: error code or zero on success 3569 */ 3570 int of_clk_detect_critical(struct device_node *np, 3571 int index, unsigned long *flags) 3572 { 3573 struct property *prop; 3574 const __be32 *cur; 3575 uint32_t idx; 3576 3577 if (!np || !flags) 3578 return -EINVAL; 3579 3580 of_property_for_each_u32(np, "clock-critical", prop, cur, idx) 3581 if (index == idx) 3582 *flags |= CLK_IS_CRITICAL; 3583 3584 return 0; 3585 } 3586 3587 /** 3588 * of_clk_init() - Scan and init clock providers from the DT 3589 * @matches: array of compatible values and init functions for providers. 3590 * 3591 * This function scans the device tree for matching clock providers 3592 * and calls their initialization functions. It also does it by trying 3593 * to follow the dependencies. 3594 */ 3595 void __init of_clk_init(const struct of_device_id *matches) 3596 { 3597 const struct of_device_id *match; 3598 struct device_node *np; 3599 struct clock_provider *clk_provider, *next; 3600 bool is_init_done; 3601 bool force = false; 3602 LIST_HEAD(clk_provider_list); 3603 3604 if (!matches) 3605 matches = &__clk_of_table; 3606 3607 /* First prepare the list of the clocks providers */ 3608 for_each_matching_node_and_match(np, matches, &match) { 3609 struct clock_provider *parent; 3610 3611 if (!of_device_is_available(np)) 3612 continue; 3613 3614 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 3615 if (!parent) { 3616 list_for_each_entry_safe(clk_provider, next, 3617 &clk_provider_list, node) { 3618 list_del(&clk_provider->node); 3619 of_node_put(clk_provider->np); 3620 kfree(clk_provider); 3621 } 3622 of_node_put(np); 3623 return; 3624 } 3625 3626 parent->clk_init_cb = match->data; 3627 parent->np = of_node_get(np); 3628 list_add_tail(&parent->node, &clk_provider_list); 3629 } 3630 3631 while (!list_empty(&clk_provider_list)) { 3632 is_init_done = false; 3633 list_for_each_entry_safe(clk_provider, next, 3634 &clk_provider_list, node) { 3635 if (force || parent_ready(clk_provider->np)) { 3636 3637 /* Don't populate platform devices */ 3638 of_node_set_flag(clk_provider->np, 3639 OF_POPULATED); 3640 3641 clk_provider->clk_init_cb(clk_provider->np); 3642 of_clk_set_defaults(clk_provider->np, true); 3643 3644 list_del(&clk_provider->node); 3645 of_node_put(clk_provider->np); 3646 kfree(clk_provider); 3647 is_init_done = true; 3648 } 3649 } 3650 3651 /* 3652 * We didn't manage to initialize any of the 3653 * remaining providers during the last loop, so now we 3654 * initialize all the remaining ones unconditionally 3655 * in case the clock parent was not mandatory 3656 */ 3657 if (!is_init_done) 3658 force = true; 3659 } 3660 } 3661 #endif 3662