1 /* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/clk.txt 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/clk-provider.h> 14 #include <linux/clk/clk-conf.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/spinlock.h> 18 #include <linux/err.h> 19 #include <linux/list.h> 20 #include <linux/slab.h> 21 #include <linux/of.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/sched.h> 25 #include <linux/clkdev.h> 26 27 #include "clk.h" 28 29 static DEFINE_SPINLOCK(enable_lock); 30 static DEFINE_MUTEX(prepare_lock); 31 32 static struct task_struct *prepare_owner; 33 static struct task_struct *enable_owner; 34 35 static int prepare_refcnt; 36 static int enable_refcnt; 37 38 static HLIST_HEAD(clk_root_list); 39 static HLIST_HEAD(clk_orphan_list); 40 static LIST_HEAD(clk_notifier_list); 41 42 /*** private data structures ***/ 43 44 struct clk_core { 45 const char *name; 46 const struct clk_ops *ops; 47 struct clk_hw *hw; 48 struct module *owner; 49 struct clk_core *parent; 50 const char **parent_names; 51 struct clk_core **parents; 52 u8 num_parents; 53 u8 new_parent_index; 54 unsigned long rate; 55 unsigned long req_rate; 56 unsigned long new_rate; 57 struct clk_core *new_parent; 58 struct clk_core *new_child; 59 unsigned long flags; 60 bool orphan; 61 unsigned int enable_count; 62 unsigned int prepare_count; 63 unsigned long min_rate; 64 unsigned long max_rate; 65 unsigned long accuracy; 66 int phase; 67 struct hlist_head children; 68 struct hlist_node child_node; 69 struct hlist_head clks; 70 unsigned int notifier_count; 71 #ifdef CONFIG_DEBUG_FS 72 struct dentry *dentry; 73 struct hlist_node debug_node; 74 #endif 75 struct kref ref; 76 }; 77 78 #define CREATE_TRACE_POINTS 79 #include <trace/events/clk.h> 80 81 struct clk { 82 struct clk_core *core; 83 const char *dev_id; 84 const char *con_id; 85 unsigned long min_rate; 86 unsigned long max_rate; 87 struct hlist_node clks_node; 88 }; 89 90 /*** locking ***/ 91 static void clk_prepare_lock(void) 92 { 93 if (!mutex_trylock(&prepare_lock)) { 94 if (prepare_owner == current) { 95 prepare_refcnt++; 96 return; 97 } 98 mutex_lock(&prepare_lock); 99 } 100 WARN_ON_ONCE(prepare_owner != NULL); 101 WARN_ON_ONCE(prepare_refcnt != 0); 102 prepare_owner = current; 103 prepare_refcnt = 1; 104 } 105 106 static void clk_prepare_unlock(void) 107 { 108 WARN_ON_ONCE(prepare_owner != current); 109 WARN_ON_ONCE(prepare_refcnt == 0); 110 111 if (--prepare_refcnt) 112 return; 113 prepare_owner = NULL; 114 mutex_unlock(&prepare_lock); 115 } 116 117 static unsigned long clk_enable_lock(void) 118 __acquires(enable_lock) 119 { 120 unsigned long flags; 121 122 if (!spin_trylock_irqsave(&enable_lock, flags)) { 123 if (enable_owner == current) { 124 enable_refcnt++; 125 __acquire(enable_lock); 126 return flags; 127 } 128 spin_lock_irqsave(&enable_lock, flags); 129 } 130 WARN_ON_ONCE(enable_owner != NULL); 131 WARN_ON_ONCE(enable_refcnt != 0); 132 enable_owner = current; 133 enable_refcnt = 1; 134 return flags; 135 } 136 137 static void clk_enable_unlock(unsigned long flags) 138 __releases(enable_lock) 139 { 140 WARN_ON_ONCE(enable_owner != current); 141 WARN_ON_ONCE(enable_refcnt == 0); 142 143 if (--enable_refcnt) { 144 __release(enable_lock); 145 return; 146 } 147 enable_owner = NULL; 148 spin_unlock_irqrestore(&enable_lock, flags); 149 } 150 151 static bool clk_core_is_prepared(struct clk_core *core) 152 { 153 /* 154 * .is_prepared is optional for clocks that can prepare 155 * fall back to software usage counter if it is missing 156 */ 157 if (!core->ops->is_prepared) 158 return core->prepare_count; 159 160 return core->ops->is_prepared(core->hw); 161 } 162 163 static bool clk_core_is_enabled(struct clk_core *core) 164 { 165 /* 166 * .is_enabled is only mandatory for clocks that gate 167 * fall back to software usage counter if .is_enabled is missing 168 */ 169 if (!core->ops->is_enabled) 170 return core->enable_count; 171 172 return core->ops->is_enabled(core->hw); 173 } 174 175 static void clk_unprepare_unused_subtree(struct clk_core *core) 176 { 177 struct clk_core *child; 178 179 lockdep_assert_held(&prepare_lock); 180 181 hlist_for_each_entry(child, &core->children, child_node) 182 clk_unprepare_unused_subtree(child); 183 184 if (core->prepare_count) 185 return; 186 187 if (core->flags & CLK_IGNORE_UNUSED) 188 return; 189 190 if (clk_core_is_prepared(core)) { 191 trace_clk_unprepare(core); 192 if (core->ops->unprepare_unused) 193 core->ops->unprepare_unused(core->hw); 194 else if (core->ops->unprepare) 195 core->ops->unprepare(core->hw); 196 trace_clk_unprepare_complete(core); 197 } 198 } 199 200 static void clk_disable_unused_subtree(struct clk_core *core) 201 { 202 struct clk_core *child; 203 unsigned long flags; 204 205 lockdep_assert_held(&prepare_lock); 206 207 hlist_for_each_entry(child, &core->children, child_node) 208 clk_disable_unused_subtree(child); 209 210 flags = clk_enable_lock(); 211 212 if (core->enable_count) 213 goto unlock_out; 214 215 if (core->flags & CLK_IGNORE_UNUSED) 216 goto unlock_out; 217 218 /* 219 * some gate clocks have special needs during the disable-unused 220 * sequence. call .disable_unused if available, otherwise fall 221 * back to .disable 222 */ 223 if (clk_core_is_enabled(core)) { 224 trace_clk_disable(core); 225 if (core->ops->disable_unused) 226 core->ops->disable_unused(core->hw); 227 else if (core->ops->disable) 228 core->ops->disable(core->hw); 229 trace_clk_disable_complete(core); 230 } 231 232 unlock_out: 233 clk_enable_unlock(flags); 234 } 235 236 static bool clk_ignore_unused; 237 static int __init clk_ignore_unused_setup(char *__unused) 238 { 239 clk_ignore_unused = true; 240 return 1; 241 } 242 __setup("clk_ignore_unused", clk_ignore_unused_setup); 243 244 static int clk_disable_unused(void) 245 { 246 struct clk_core *core; 247 248 if (clk_ignore_unused) { 249 pr_warn("clk: Not disabling unused clocks\n"); 250 return 0; 251 } 252 253 clk_prepare_lock(); 254 255 hlist_for_each_entry(core, &clk_root_list, child_node) 256 clk_disable_unused_subtree(core); 257 258 hlist_for_each_entry(core, &clk_orphan_list, child_node) 259 clk_disable_unused_subtree(core); 260 261 hlist_for_each_entry(core, &clk_root_list, child_node) 262 clk_unprepare_unused_subtree(core); 263 264 hlist_for_each_entry(core, &clk_orphan_list, child_node) 265 clk_unprepare_unused_subtree(core); 266 267 clk_prepare_unlock(); 268 269 return 0; 270 } 271 late_initcall_sync(clk_disable_unused); 272 273 /*** helper functions ***/ 274 275 const char *__clk_get_name(struct clk *clk) 276 { 277 return !clk ? NULL : clk->core->name; 278 } 279 EXPORT_SYMBOL_GPL(__clk_get_name); 280 281 const char *clk_hw_get_name(const struct clk_hw *hw) 282 { 283 return hw->core->name; 284 } 285 EXPORT_SYMBOL_GPL(clk_hw_get_name); 286 287 struct clk_hw *__clk_get_hw(struct clk *clk) 288 { 289 return !clk ? NULL : clk->core->hw; 290 } 291 EXPORT_SYMBOL_GPL(__clk_get_hw); 292 293 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 294 { 295 return hw->core->num_parents; 296 } 297 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 298 299 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 300 { 301 return hw->core->parent ? hw->core->parent->hw : NULL; 302 } 303 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 304 305 static struct clk_core *__clk_lookup_subtree(const char *name, 306 struct clk_core *core) 307 { 308 struct clk_core *child; 309 struct clk_core *ret; 310 311 if (!strcmp(core->name, name)) 312 return core; 313 314 hlist_for_each_entry(child, &core->children, child_node) { 315 ret = __clk_lookup_subtree(name, child); 316 if (ret) 317 return ret; 318 } 319 320 return NULL; 321 } 322 323 static struct clk_core *clk_core_lookup(const char *name) 324 { 325 struct clk_core *root_clk; 326 struct clk_core *ret; 327 328 if (!name) 329 return NULL; 330 331 /* search the 'proper' clk tree first */ 332 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 333 ret = __clk_lookup_subtree(name, root_clk); 334 if (ret) 335 return ret; 336 } 337 338 /* if not found, then search the orphan tree */ 339 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 340 ret = __clk_lookup_subtree(name, root_clk); 341 if (ret) 342 return ret; 343 } 344 345 return NULL; 346 } 347 348 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 349 u8 index) 350 { 351 if (!core || index >= core->num_parents) 352 return NULL; 353 else if (!core->parents) 354 return clk_core_lookup(core->parent_names[index]); 355 else if (!core->parents[index]) 356 return core->parents[index] = 357 clk_core_lookup(core->parent_names[index]); 358 else 359 return core->parents[index]; 360 } 361 362 struct clk_hw * 363 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 364 { 365 struct clk_core *parent; 366 367 parent = clk_core_get_parent_by_index(hw->core, index); 368 369 return !parent ? NULL : parent->hw; 370 } 371 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 372 373 unsigned int __clk_get_enable_count(struct clk *clk) 374 { 375 return !clk ? 0 : clk->core->enable_count; 376 } 377 378 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 379 { 380 unsigned long ret; 381 382 if (!core) { 383 ret = 0; 384 goto out; 385 } 386 387 ret = core->rate; 388 389 if (core->flags & CLK_IS_ROOT) 390 goto out; 391 392 if (!core->parent) 393 ret = 0; 394 395 out: 396 return ret; 397 } 398 399 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 400 { 401 return clk_core_get_rate_nolock(hw->core); 402 } 403 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 404 405 static unsigned long __clk_get_accuracy(struct clk_core *core) 406 { 407 if (!core) 408 return 0; 409 410 return core->accuracy; 411 } 412 413 unsigned long __clk_get_flags(struct clk *clk) 414 { 415 return !clk ? 0 : clk->core->flags; 416 } 417 EXPORT_SYMBOL_GPL(__clk_get_flags); 418 419 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 420 { 421 return hw->core->flags; 422 } 423 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 424 425 bool clk_hw_is_prepared(const struct clk_hw *hw) 426 { 427 return clk_core_is_prepared(hw->core); 428 } 429 430 bool __clk_is_enabled(struct clk *clk) 431 { 432 if (!clk) 433 return false; 434 435 return clk_core_is_enabled(clk->core); 436 } 437 EXPORT_SYMBOL_GPL(__clk_is_enabled); 438 439 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 440 unsigned long best, unsigned long flags) 441 { 442 if (flags & CLK_MUX_ROUND_CLOSEST) 443 return abs(now - rate) < abs(best - rate); 444 445 return now <= rate && now > best; 446 } 447 448 static int 449 clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, 450 unsigned long flags) 451 { 452 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 453 int i, num_parents, ret; 454 unsigned long best = 0; 455 struct clk_rate_request parent_req = *req; 456 457 /* if NO_REPARENT flag set, pass through to current parent */ 458 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 459 parent = core->parent; 460 if (core->flags & CLK_SET_RATE_PARENT) { 461 ret = __clk_determine_rate(parent ? parent->hw : NULL, 462 &parent_req); 463 if (ret) 464 return ret; 465 466 best = parent_req.rate; 467 } else if (parent) { 468 best = clk_core_get_rate_nolock(parent); 469 } else { 470 best = clk_core_get_rate_nolock(core); 471 } 472 473 goto out; 474 } 475 476 /* find the parent that can provide the fastest rate <= rate */ 477 num_parents = core->num_parents; 478 for (i = 0; i < num_parents; i++) { 479 parent = clk_core_get_parent_by_index(core, i); 480 if (!parent) 481 continue; 482 483 if (core->flags & CLK_SET_RATE_PARENT) { 484 parent_req = *req; 485 ret = __clk_determine_rate(parent->hw, &parent_req); 486 if (ret) 487 continue; 488 } else { 489 parent_req.rate = clk_core_get_rate_nolock(parent); 490 } 491 492 if (mux_is_better_rate(req->rate, parent_req.rate, 493 best, flags)) { 494 best_parent = parent; 495 best = parent_req.rate; 496 } 497 } 498 499 if (!best_parent) 500 return -EINVAL; 501 502 out: 503 if (best_parent) 504 req->best_parent_hw = best_parent->hw; 505 req->best_parent_rate = best; 506 req->rate = best; 507 508 return 0; 509 } 510 511 struct clk *__clk_lookup(const char *name) 512 { 513 struct clk_core *core = clk_core_lookup(name); 514 515 return !core ? NULL : core->hw->clk; 516 } 517 518 static void clk_core_get_boundaries(struct clk_core *core, 519 unsigned long *min_rate, 520 unsigned long *max_rate) 521 { 522 struct clk *clk_user; 523 524 *min_rate = core->min_rate; 525 *max_rate = core->max_rate; 526 527 hlist_for_each_entry(clk_user, &core->clks, clks_node) 528 *min_rate = max(*min_rate, clk_user->min_rate); 529 530 hlist_for_each_entry(clk_user, &core->clks, clks_node) 531 *max_rate = min(*max_rate, clk_user->max_rate); 532 } 533 534 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 535 unsigned long max_rate) 536 { 537 hw->core->min_rate = min_rate; 538 hw->core->max_rate = max_rate; 539 } 540 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 541 542 /* 543 * Helper for finding best parent to provide a given frequency. This can be used 544 * directly as a determine_rate callback (e.g. for a mux), or from a more 545 * complex clock that may combine a mux with other operations. 546 */ 547 int __clk_mux_determine_rate(struct clk_hw *hw, 548 struct clk_rate_request *req) 549 { 550 return clk_mux_determine_rate_flags(hw, req, 0); 551 } 552 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 553 554 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 555 struct clk_rate_request *req) 556 { 557 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 558 } 559 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 560 561 /*** clk api ***/ 562 563 static void clk_core_unprepare(struct clk_core *core) 564 { 565 lockdep_assert_held(&prepare_lock); 566 567 if (!core) 568 return; 569 570 if (WARN_ON(core->prepare_count == 0)) 571 return; 572 573 if (--core->prepare_count > 0) 574 return; 575 576 WARN_ON(core->enable_count > 0); 577 578 trace_clk_unprepare(core); 579 580 if (core->ops->unprepare) 581 core->ops->unprepare(core->hw); 582 583 trace_clk_unprepare_complete(core); 584 clk_core_unprepare(core->parent); 585 } 586 587 /** 588 * clk_unprepare - undo preparation of a clock source 589 * @clk: the clk being unprepared 590 * 591 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 592 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 593 * if the operation may sleep. One example is a clk which is accessed over 594 * I2c. In the complex case a clk gate operation may require a fast and a slow 595 * part. It is this reason that clk_unprepare and clk_disable are not mutually 596 * exclusive. In fact clk_disable must be called before clk_unprepare. 597 */ 598 void clk_unprepare(struct clk *clk) 599 { 600 if (IS_ERR_OR_NULL(clk)) 601 return; 602 603 clk_prepare_lock(); 604 clk_core_unprepare(clk->core); 605 clk_prepare_unlock(); 606 } 607 EXPORT_SYMBOL_GPL(clk_unprepare); 608 609 static int clk_core_prepare(struct clk_core *core) 610 { 611 int ret = 0; 612 613 lockdep_assert_held(&prepare_lock); 614 615 if (!core) 616 return 0; 617 618 if (core->prepare_count == 0) { 619 ret = clk_core_prepare(core->parent); 620 if (ret) 621 return ret; 622 623 trace_clk_prepare(core); 624 625 if (core->ops->prepare) 626 ret = core->ops->prepare(core->hw); 627 628 trace_clk_prepare_complete(core); 629 630 if (ret) { 631 clk_core_unprepare(core->parent); 632 return ret; 633 } 634 } 635 636 core->prepare_count++; 637 638 return 0; 639 } 640 641 /** 642 * clk_prepare - prepare a clock source 643 * @clk: the clk being prepared 644 * 645 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 646 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 647 * operation may sleep. One example is a clk which is accessed over I2c. In 648 * the complex case a clk ungate operation may require a fast and a slow part. 649 * It is this reason that clk_prepare and clk_enable are not mutually 650 * exclusive. In fact clk_prepare must be called before clk_enable. 651 * Returns 0 on success, -EERROR otherwise. 652 */ 653 int clk_prepare(struct clk *clk) 654 { 655 int ret; 656 657 if (!clk) 658 return 0; 659 660 clk_prepare_lock(); 661 ret = clk_core_prepare(clk->core); 662 clk_prepare_unlock(); 663 664 return ret; 665 } 666 EXPORT_SYMBOL_GPL(clk_prepare); 667 668 static void clk_core_disable(struct clk_core *core) 669 { 670 lockdep_assert_held(&enable_lock); 671 672 if (!core) 673 return; 674 675 if (WARN_ON(core->enable_count == 0)) 676 return; 677 678 if (--core->enable_count > 0) 679 return; 680 681 trace_clk_disable(core); 682 683 if (core->ops->disable) 684 core->ops->disable(core->hw); 685 686 trace_clk_disable_complete(core); 687 688 clk_core_disable(core->parent); 689 } 690 691 /** 692 * clk_disable - gate a clock 693 * @clk: the clk being gated 694 * 695 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 696 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 697 * clk if the operation is fast and will never sleep. One example is a 698 * SoC-internal clk which is controlled via simple register writes. In the 699 * complex case a clk gate operation may require a fast and a slow part. It is 700 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 701 * In fact clk_disable must be called before clk_unprepare. 702 */ 703 void clk_disable(struct clk *clk) 704 { 705 unsigned long flags; 706 707 if (IS_ERR_OR_NULL(clk)) 708 return; 709 710 flags = clk_enable_lock(); 711 clk_core_disable(clk->core); 712 clk_enable_unlock(flags); 713 } 714 EXPORT_SYMBOL_GPL(clk_disable); 715 716 static int clk_core_enable(struct clk_core *core) 717 { 718 int ret = 0; 719 720 lockdep_assert_held(&enable_lock); 721 722 if (!core) 723 return 0; 724 725 if (WARN_ON(core->prepare_count == 0)) 726 return -ESHUTDOWN; 727 728 if (core->enable_count == 0) { 729 ret = clk_core_enable(core->parent); 730 731 if (ret) 732 return ret; 733 734 trace_clk_enable(core); 735 736 if (core->ops->enable) 737 ret = core->ops->enable(core->hw); 738 739 trace_clk_enable_complete(core); 740 741 if (ret) { 742 clk_core_disable(core->parent); 743 return ret; 744 } 745 } 746 747 core->enable_count++; 748 return 0; 749 } 750 751 /** 752 * clk_enable - ungate a clock 753 * @clk: the clk being ungated 754 * 755 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 756 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 757 * if the operation will never sleep. One example is a SoC-internal clk which 758 * is controlled via simple register writes. In the complex case a clk ungate 759 * operation may require a fast and a slow part. It is this reason that 760 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 761 * must be called before clk_enable. Returns 0 on success, -EERROR 762 * otherwise. 763 */ 764 int clk_enable(struct clk *clk) 765 { 766 unsigned long flags; 767 int ret; 768 769 if (!clk) 770 return 0; 771 772 flags = clk_enable_lock(); 773 ret = clk_core_enable(clk->core); 774 clk_enable_unlock(flags); 775 776 return ret; 777 } 778 EXPORT_SYMBOL_GPL(clk_enable); 779 780 static int clk_core_round_rate_nolock(struct clk_core *core, 781 struct clk_rate_request *req) 782 { 783 struct clk_core *parent; 784 long rate; 785 786 lockdep_assert_held(&prepare_lock); 787 788 if (!core) 789 return 0; 790 791 parent = core->parent; 792 if (parent) { 793 req->best_parent_hw = parent->hw; 794 req->best_parent_rate = parent->rate; 795 } else { 796 req->best_parent_hw = NULL; 797 req->best_parent_rate = 0; 798 } 799 800 if (core->ops->determine_rate) { 801 return core->ops->determine_rate(core->hw, req); 802 } else if (core->ops->round_rate) { 803 rate = core->ops->round_rate(core->hw, req->rate, 804 &req->best_parent_rate); 805 if (rate < 0) 806 return rate; 807 808 req->rate = rate; 809 } else if (core->flags & CLK_SET_RATE_PARENT) { 810 return clk_core_round_rate_nolock(parent, req); 811 } else { 812 req->rate = core->rate; 813 } 814 815 return 0; 816 } 817 818 /** 819 * __clk_determine_rate - get the closest rate actually supported by a clock 820 * @hw: determine the rate of this clock 821 * @rate: target rate 822 * @min_rate: returned rate must be greater than this rate 823 * @max_rate: returned rate must be less than this rate 824 * 825 * Useful for clk_ops such as .set_rate and .determine_rate. 826 */ 827 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 828 { 829 if (!hw) { 830 req->rate = 0; 831 return 0; 832 } 833 834 return clk_core_round_rate_nolock(hw->core, req); 835 } 836 EXPORT_SYMBOL_GPL(__clk_determine_rate); 837 838 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 839 { 840 int ret; 841 struct clk_rate_request req; 842 843 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); 844 req.rate = rate; 845 846 ret = clk_core_round_rate_nolock(hw->core, &req); 847 if (ret) 848 return 0; 849 850 return req.rate; 851 } 852 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 853 854 /** 855 * clk_round_rate - round the given rate for a clk 856 * @clk: the clk for which we are rounding a rate 857 * @rate: the rate which is to be rounded 858 * 859 * Takes in a rate as input and rounds it to a rate that the clk can actually 860 * use which is then returned. If clk doesn't support round_rate operation 861 * then the parent rate is returned. 862 */ 863 long clk_round_rate(struct clk *clk, unsigned long rate) 864 { 865 struct clk_rate_request req; 866 int ret; 867 868 if (!clk) 869 return 0; 870 871 clk_prepare_lock(); 872 873 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); 874 req.rate = rate; 875 876 ret = clk_core_round_rate_nolock(clk->core, &req); 877 clk_prepare_unlock(); 878 879 if (ret) 880 return ret; 881 882 return req.rate; 883 } 884 EXPORT_SYMBOL_GPL(clk_round_rate); 885 886 /** 887 * __clk_notify - call clk notifier chain 888 * @core: clk that is changing rate 889 * @msg: clk notifier type (see include/linux/clk.h) 890 * @old_rate: old clk rate 891 * @new_rate: new clk rate 892 * 893 * Triggers a notifier call chain on the clk rate-change notification 894 * for 'clk'. Passes a pointer to the struct clk and the previous 895 * and current rates to the notifier callback. Intended to be called by 896 * internal clock code only. Returns NOTIFY_DONE from the last driver 897 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 898 * a driver returns that. 899 */ 900 static int __clk_notify(struct clk_core *core, unsigned long msg, 901 unsigned long old_rate, unsigned long new_rate) 902 { 903 struct clk_notifier *cn; 904 struct clk_notifier_data cnd; 905 int ret = NOTIFY_DONE; 906 907 cnd.old_rate = old_rate; 908 cnd.new_rate = new_rate; 909 910 list_for_each_entry(cn, &clk_notifier_list, node) { 911 if (cn->clk->core == core) { 912 cnd.clk = cn->clk; 913 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 914 &cnd); 915 } 916 } 917 918 return ret; 919 } 920 921 /** 922 * __clk_recalc_accuracies 923 * @core: first clk in the subtree 924 * 925 * Walks the subtree of clks starting with clk and recalculates accuracies as 926 * it goes. Note that if a clk does not implement the .recalc_accuracy 927 * callback then it is assumed that the clock will take on the accuracy of its 928 * parent. 929 */ 930 static void __clk_recalc_accuracies(struct clk_core *core) 931 { 932 unsigned long parent_accuracy = 0; 933 struct clk_core *child; 934 935 lockdep_assert_held(&prepare_lock); 936 937 if (core->parent) 938 parent_accuracy = core->parent->accuracy; 939 940 if (core->ops->recalc_accuracy) 941 core->accuracy = core->ops->recalc_accuracy(core->hw, 942 parent_accuracy); 943 else 944 core->accuracy = parent_accuracy; 945 946 hlist_for_each_entry(child, &core->children, child_node) 947 __clk_recalc_accuracies(child); 948 } 949 950 static long clk_core_get_accuracy(struct clk_core *core) 951 { 952 unsigned long accuracy; 953 954 clk_prepare_lock(); 955 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 956 __clk_recalc_accuracies(core); 957 958 accuracy = __clk_get_accuracy(core); 959 clk_prepare_unlock(); 960 961 return accuracy; 962 } 963 964 /** 965 * clk_get_accuracy - return the accuracy of clk 966 * @clk: the clk whose accuracy is being returned 967 * 968 * Simply returns the cached accuracy of the clk, unless 969 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 970 * issued. 971 * If clk is NULL then returns 0. 972 */ 973 long clk_get_accuracy(struct clk *clk) 974 { 975 if (!clk) 976 return 0; 977 978 return clk_core_get_accuracy(clk->core); 979 } 980 EXPORT_SYMBOL_GPL(clk_get_accuracy); 981 982 static unsigned long clk_recalc(struct clk_core *core, 983 unsigned long parent_rate) 984 { 985 if (core->ops->recalc_rate) 986 return core->ops->recalc_rate(core->hw, parent_rate); 987 return parent_rate; 988 } 989 990 /** 991 * __clk_recalc_rates 992 * @core: first clk in the subtree 993 * @msg: notification type (see include/linux/clk.h) 994 * 995 * Walks the subtree of clks starting with clk and recalculates rates as it 996 * goes. Note that if a clk does not implement the .recalc_rate callback then 997 * it is assumed that the clock will take on the rate of its parent. 998 * 999 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1000 * if necessary. 1001 */ 1002 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) 1003 { 1004 unsigned long old_rate; 1005 unsigned long parent_rate = 0; 1006 struct clk_core *child; 1007 1008 lockdep_assert_held(&prepare_lock); 1009 1010 old_rate = core->rate; 1011 1012 if (core->parent) 1013 parent_rate = core->parent->rate; 1014 1015 core->rate = clk_recalc(core, parent_rate); 1016 1017 /* 1018 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1019 * & ABORT_RATE_CHANGE notifiers 1020 */ 1021 if (core->notifier_count && msg) 1022 __clk_notify(core, msg, old_rate, core->rate); 1023 1024 hlist_for_each_entry(child, &core->children, child_node) 1025 __clk_recalc_rates(child, msg); 1026 } 1027 1028 static unsigned long clk_core_get_rate(struct clk_core *core) 1029 { 1030 unsigned long rate; 1031 1032 clk_prepare_lock(); 1033 1034 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1035 __clk_recalc_rates(core, 0); 1036 1037 rate = clk_core_get_rate_nolock(core); 1038 clk_prepare_unlock(); 1039 1040 return rate; 1041 } 1042 1043 /** 1044 * clk_get_rate - return the rate of clk 1045 * @clk: the clk whose rate is being returned 1046 * 1047 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1048 * is set, which means a recalc_rate will be issued. 1049 * If clk is NULL then returns 0. 1050 */ 1051 unsigned long clk_get_rate(struct clk *clk) 1052 { 1053 if (!clk) 1054 return 0; 1055 1056 return clk_core_get_rate(clk->core); 1057 } 1058 EXPORT_SYMBOL_GPL(clk_get_rate); 1059 1060 static int clk_fetch_parent_index(struct clk_core *core, 1061 struct clk_core *parent) 1062 { 1063 int i; 1064 1065 if (!core->parents) { 1066 core->parents = kcalloc(core->num_parents, 1067 sizeof(struct clk *), GFP_KERNEL); 1068 if (!core->parents) 1069 return -ENOMEM; 1070 } 1071 1072 /* 1073 * find index of new parent clock using cached parent ptrs, 1074 * or if not yet cached, use string name comparison and cache 1075 * them now to avoid future calls to clk_core_lookup. 1076 */ 1077 for (i = 0; i < core->num_parents; i++) { 1078 if (core->parents[i] == parent) 1079 return i; 1080 1081 if (core->parents[i]) 1082 continue; 1083 1084 if (!strcmp(core->parent_names[i], parent->name)) { 1085 core->parents[i] = clk_core_lookup(parent->name); 1086 return i; 1087 } 1088 } 1089 1090 return -EINVAL; 1091 } 1092 1093 /* 1094 * Update the orphan status of @core and all its children. 1095 */ 1096 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1097 { 1098 struct clk_core *child; 1099 1100 core->orphan = is_orphan; 1101 1102 hlist_for_each_entry(child, &core->children, child_node) 1103 clk_core_update_orphan_status(child, is_orphan); 1104 } 1105 1106 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1107 { 1108 bool was_orphan = core->orphan; 1109 1110 hlist_del(&core->child_node); 1111 1112 if (new_parent) { 1113 bool becomes_orphan = new_parent->orphan; 1114 1115 /* avoid duplicate POST_RATE_CHANGE notifications */ 1116 if (new_parent->new_child == core) 1117 new_parent->new_child = NULL; 1118 1119 hlist_add_head(&core->child_node, &new_parent->children); 1120 1121 if (was_orphan != becomes_orphan) 1122 clk_core_update_orphan_status(core, becomes_orphan); 1123 } else { 1124 hlist_add_head(&core->child_node, &clk_orphan_list); 1125 if (!was_orphan) 1126 clk_core_update_orphan_status(core, true); 1127 } 1128 1129 core->parent = new_parent; 1130 } 1131 1132 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 1133 struct clk_core *parent) 1134 { 1135 unsigned long flags; 1136 struct clk_core *old_parent = core->parent; 1137 1138 /* 1139 * Migrate prepare state between parents and prevent race with 1140 * clk_enable(). 1141 * 1142 * If the clock is not prepared, then a race with 1143 * clk_enable/disable() is impossible since we already have the 1144 * prepare lock (future calls to clk_enable() need to be preceded by 1145 * a clk_prepare()). 1146 * 1147 * If the clock is prepared, migrate the prepared state to the new 1148 * parent and also protect against a race with clk_enable() by 1149 * forcing the clock and the new parent on. This ensures that all 1150 * future calls to clk_enable() are practically NOPs with respect to 1151 * hardware and software states. 1152 * 1153 * See also: Comment for clk_set_parent() below. 1154 */ 1155 if (core->prepare_count) { 1156 clk_core_prepare(parent); 1157 flags = clk_enable_lock(); 1158 clk_core_enable(parent); 1159 clk_core_enable(core); 1160 clk_enable_unlock(flags); 1161 } 1162 1163 /* update the clk tree topology */ 1164 flags = clk_enable_lock(); 1165 clk_reparent(core, parent); 1166 clk_enable_unlock(flags); 1167 1168 return old_parent; 1169 } 1170 1171 static void __clk_set_parent_after(struct clk_core *core, 1172 struct clk_core *parent, 1173 struct clk_core *old_parent) 1174 { 1175 unsigned long flags; 1176 1177 /* 1178 * Finish the migration of prepare state and undo the changes done 1179 * for preventing a race with clk_enable(). 1180 */ 1181 if (core->prepare_count) { 1182 flags = clk_enable_lock(); 1183 clk_core_disable(core); 1184 clk_core_disable(old_parent); 1185 clk_enable_unlock(flags); 1186 clk_core_unprepare(old_parent); 1187 } 1188 } 1189 1190 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 1191 u8 p_index) 1192 { 1193 unsigned long flags; 1194 int ret = 0; 1195 struct clk_core *old_parent; 1196 1197 old_parent = __clk_set_parent_before(core, parent); 1198 1199 trace_clk_set_parent(core, parent); 1200 1201 /* change clock input source */ 1202 if (parent && core->ops->set_parent) 1203 ret = core->ops->set_parent(core->hw, p_index); 1204 1205 trace_clk_set_parent_complete(core, parent); 1206 1207 if (ret) { 1208 flags = clk_enable_lock(); 1209 clk_reparent(core, old_parent); 1210 clk_enable_unlock(flags); 1211 __clk_set_parent_after(core, old_parent, parent); 1212 1213 return ret; 1214 } 1215 1216 __clk_set_parent_after(core, parent, old_parent); 1217 1218 return 0; 1219 } 1220 1221 /** 1222 * __clk_speculate_rates 1223 * @core: first clk in the subtree 1224 * @parent_rate: the "future" rate of clk's parent 1225 * 1226 * Walks the subtree of clks starting with clk, speculating rates as it 1227 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1228 * 1229 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1230 * pre-rate change notifications and returns early if no clks in the 1231 * subtree have subscribed to the notifications. Note that if a clk does not 1232 * implement the .recalc_rate callback then it is assumed that the clock will 1233 * take on the rate of its parent. 1234 */ 1235 static int __clk_speculate_rates(struct clk_core *core, 1236 unsigned long parent_rate) 1237 { 1238 struct clk_core *child; 1239 unsigned long new_rate; 1240 int ret = NOTIFY_DONE; 1241 1242 lockdep_assert_held(&prepare_lock); 1243 1244 new_rate = clk_recalc(core, parent_rate); 1245 1246 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1247 if (core->notifier_count) 1248 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 1249 1250 if (ret & NOTIFY_STOP_MASK) { 1251 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1252 __func__, core->name, ret); 1253 goto out; 1254 } 1255 1256 hlist_for_each_entry(child, &core->children, child_node) { 1257 ret = __clk_speculate_rates(child, new_rate); 1258 if (ret & NOTIFY_STOP_MASK) 1259 break; 1260 } 1261 1262 out: 1263 return ret; 1264 } 1265 1266 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 1267 struct clk_core *new_parent, u8 p_index) 1268 { 1269 struct clk_core *child; 1270 1271 core->new_rate = new_rate; 1272 core->new_parent = new_parent; 1273 core->new_parent_index = p_index; 1274 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1275 core->new_child = NULL; 1276 if (new_parent && new_parent != core->parent) 1277 new_parent->new_child = core; 1278 1279 hlist_for_each_entry(child, &core->children, child_node) { 1280 child->new_rate = clk_recalc(child, new_rate); 1281 clk_calc_subtree(child, child->new_rate, NULL, 0); 1282 } 1283 } 1284 1285 /* 1286 * calculate the new rates returning the topmost clock that has to be 1287 * changed. 1288 */ 1289 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 1290 unsigned long rate) 1291 { 1292 struct clk_core *top = core; 1293 struct clk_core *old_parent, *parent; 1294 unsigned long best_parent_rate = 0; 1295 unsigned long new_rate; 1296 unsigned long min_rate; 1297 unsigned long max_rate; 1298 int p_index = 0; 1299 long ret; 1300 1301 /* sanity */ 1302 if (IS_ERR_OR_NULL(core)) 1303 return NULL; 1304 1305 /* save parent rate, if it exists */ 1306 parent = old_parent = core->parent; 1307 if (parent) 1308 best_parent_rate = parent->rate; 1309 1310 clk_core_get_boundaries(core, &min_rate, &max_rate); 1311 1312 /* find the closest rate and parent clk/rate */ 1313 if (core->ops->determine_rate) { 1314 struct clk_rate_request req; 1315 1316 req.rate = rate; 1317 req.min_rate = min_rate; 1318 req.max_rate = max_rate; 1319 if (parent) { 1320 req.best_parent_hw = parent->hw; 1321 req.best_parent_rate = parent->rate; 1322 } else { 1323 req.best_parent_hw = NULL; 1324 req.best_parent_rate = 0; 1325 } 1326 1327 ret = core->ops->determine_rate(core->hw, &req); 1328 if (ret < 0) 1329 return NULL; 1330 1331 best_parent_rate = req.best_parent_rate; 1332 new_rate = req.rate; 1333 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 1334 } else if (core->ops->round_rate) { 1335 ret = core->ops->round_rate(core->hw, rate, 1336 &best_parent_rate); 1337 if (ret < 0) 1338 return NULL; 1339 1340 new_rate = ret; 1341 if (new_rate < min_rate || new_rate > max_rate) 1342 return NULL; 1343 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 1344 /* pass-through clock without adjustable parent */ 1345 core->new_rate = core->rate; 1346 return NULL; 1347 } else { 1348 /* pass-through clock with adjustable parent */ 1349 top = clk_calc_new_rates(parent, rate); 1350 new_rate = parent->new_rate; 1351 goto out; 1352 } 1353 1354 /* some clocks must be gated to change parent */ 1355 if (parent != old_parent && 1356 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 1357 pr_debug("%s: %s not gated but wants to reparent\n", 1358 __func__, core->name); 1359 return NULL; 1360 } 1361 1362 /* try finding the new parent index */ 1363 if (parent && core->num_parents > 1) { 1364 p_index = clk_fetch_parent_index(core, parent); 1365 if (p_index < 0) { 1366 pr_debug("%s: clk %s can not be parent of clk %s\n", 1367 __func__, parent->name, core->name); 1368 return NULL; 1369 } 1370 } 1371 1372 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 1373 best_parent_rate != parent->rate) 1374 top = clk_calc_new_rates(parent, best_parent_rate); 1375 1376 out: 1377 clk_calc_subtree(core, new_rate, parent, p_index); 1378 1379 return top; 1380 } 1381 1382 /* 1383 * Notify about rate changes in a subtree. Always walk down the whole tree 1384 * so that in case of an error we can walk down the whole tree again and 1385 * abort the change. 1386 */ 1387 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 1388 unsigned long event) 1389 { 1390 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 1391 int ret = NOTIFY_DONE; 1392 1393 if (core->rate == core->new_rate) 1394 return NULL; 1395 1396 if (core->notifier_count) { 1397 ret = __clk_notify(core, event, core->rate, core->new_rate); 1398 if (ret & NOTIFY_STOP_MASK) 1399 fail_clk = core; 1400 } 1401 1402 hlist_for_each_entry(child, &core->children, child_node) { 1403 /* Skip children who will be reparented to another clock */ 1404 if (child->new_parent && child->new_parent != core) 1405 continue; 1406 tmp_clk = clk_propagate_rate_change(child, event); 1407 if (tmp_clk) 1408 fail_clk = tmp_clk; 1409 } 1410 1411 /* handle the new child who might not be in core->children yet */ 1412 if (core->new_child) { 1413 tmp_clk = clk_propagate_rate_change(core->new_child, event); 1414 if (tmp_clk) 1415 fail_clk = tmp_clk; 1416 } 1417 1418 return fail_clk; 1419 } 1420 1421 /* 1422 * walk down a subtree and set the new rates notifying the rate 1423 * change on the way 1424 */ 1425 static void clk_change_rate(struct clk_core *core) 1426 { 1427 struct clk_core *child; 1428 struct hlist_node *tmp; 1429 unsigned long old_rate; 1430 unsigned long best_parent_rate = 0; 1431 bool skip_set_rate = false; 1432 struct clk_core *old_parent; 1433 1434 old_rate = core->rate; 1435 1436 if (core->new_parent) 1437 best_parent_rate = core->new_parent->rate; 1438 else if (core->parent) 1439 best_parent_rate = core->parent->rate; 1440 1441 if (core->new_parent && core->new_parent != core->parent) { 1442 old_parent = __clk_set_parent_before(core, core->new_parent); 1443 trace_clk_set_parent(core, core->new_parent); 1444 1445 if (core->ops->set_rate_and_parent) { 1446 skip_set_rate = true; 1447 core->ops->set_rate_and_parent(core->hw, core->new_rate, 1448 best_parent_rate, 1449 core->new_parent_index); 1450 } else if (core->ops->set_parent) { 1451 core->ops->set_parent(core->hw, core->new_parent_index); 1452 } 1453 1454 trace_clk_set_parent_complete(core, core->new_parent); 1455 __clk_set_parent_after(core, core->new_parent, old_parent); 1456 } 1457 1458 trace_clk_set_rate(core, core->new_rate); 1459 1460 if (!skip_set_rate && core->ops->set_rate) 1461 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 1462 1463 trace_clk_set_rate_complete(core, core->new_rate); 1464 1465 core->rate = clk_recalc(core, best_parent_rate); 1466 1467 if (core->notifier_count && old_rate != core->rate) 1468 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 1469 1470 if (core->flags & CLK_RECALC_NEW_RATES) 1471 (void)clk_calc_new_rates(core, core->new_rate); 1472 1473 /* 1474 * Use safe iteration, as change_rate can actually swap parents 1475 * for certain clock types. 1476 */ 1477 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 1478 /* Skip children who will be reparented to another clock */ 1479 if (child->new_parent && child->new_parent != core) 1480 continue; 1481 clk_change_rate(child); 1482 } 1483 1484 /* handle the new child who might not be in core->children yet */ 1485 if (core->new_child) 1486 clk_change_rate(core->new_child); 1487 } 1488 1489 static int clk_core_set_rate_nolock(struct clk_core *core, 1490 unsigned long req_rate) 1491 { 1492 struct clk_core *top, *fail_clk; 1493 unsigned long rate = req_rate; 1494 int ret = 0; 1495 1496 if (!core) 1497 return 0; 1498 1499 /* bail early if nothing to do */ 1500 if (rate == clk_core_get_rate_nolock(core)) 1501 return 0; 1502 1503 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count) 1504 return -EBUSY; 1505 1506 /* calculate new rates and get the topmost changed clock */ 1507 top = clk_calc_new_rates(core, rate); 1508 if (!top) 1509 return -EINVAL; 1510 1511 /* notify that we are about to change rates */ 1512 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1513 if (fail_clk) { 1514 pr_debug("%s: failed to set %s rate\n", __func__, 1515 fail_clk->name); 1516 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1517 return -EBUSY; 1518 } 1519 1520 /* change the rates */ 1521 clk_change_rate(top); 1522 1523 core->req_rate = req_rate; 1524 1525 return ret; 1526 } 1527 1528 /** 1529 * clk_set_rate - specify a new rate for clk 1530 * @clk: the clk whose rate is being changed 1531 * @rate: the new rate for clk 1532 * 1533 * In the simplest case clk_set_rate will only adjust the rate of clk. 1534 * 1535 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1536 * propagate up to clk's parent; whether or not this happens depends on the 1537 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1538 * after calling .round_rate then upstream parent propagation is ignored. If 1539 * *parent_rate comes back with a new rate for clk's parent then we propagate 1540 * up to clk's parent and set its rate. Upward propagation will continue 1541 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1542 * .round_rate stops requesting changes to clk's parent_rate. 1543 * 1544 * Rate changes are accomplished via tree traversal that also recalculates the 1545 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1546 * 1547 * Returns 0 on success, -EERROR otherwise. 1548 */ 1549 int clk_set_rate(struct clk *clk, unsigned long rate) 1550 { 1551 int ret; 1552 1553 if (!clk) 1554 return 0; 1555 1556 /* prevent racing with updates to the clock topology */ 1557 clk_prepare_lock(); 1558 1559 ret = clk_core_set_rate_nolock(clk->core, rate); 1560 1561 clk_prepare_unlock(); 1562 1563 return ret; 1564 } 1565 EXPORT_SYMBOL_GPL(clk_set_rate); 1566 1567 /** 1568 * clk_set_rate_range - set a rate range for a clock source 1569 * @clk: clock source 1570 * @min: desired minimum clock rate in Hz, inclusive 1571 * @max: desired maximum clock rate in Hz, inclusive 1572 * 1573 * Returns success (0) or negative errno. 1574 */ 1575 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 1576 { 1577 int ret = 0; 1578 1579 if (!clk) 1580 return 0; 1581 1582 if (min > max) { 1583 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 1584 __func__, clk->core->name, clk->dev_id, clk->con_id, 1585 min, max); 1586 return -EINVAL; 1587 } 1588 1589 clk_prepare_lock(); 1590 1591 if (min != clk->min_rate || max != clk->max_rate) { 1592 clk->min_rate = min; 1593 clk->max_rate = max; 1594 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 1595 } 1596 1597 clk_prepare_unlock(); 1598 1599 return ret; 1600 } 1601 EXPORT_SYMBOL_GPL(clk_set_rate_range); 1602 1603 /** 1604 * clk_set_min_rate - set a minimum clock rate for a clock source 1605 * @clk: clock source 1606 * @rate: desired minimum clock rate in Hz, inclusive 1607 * 1608 * Returns success (0) or negative errno. 1609 */ 1610 int clk_set_min_rate(struct clk *clk, unsigned long rate) 1611 { 1612 if (!clk) 1613 return 0; 1614 1615 return clk_set_rate_range(clk, rate, clk->max_rate); 1616 } 1617 EXPORT_SYMBOL_GPL(clk_set_min_rate); 1618 1619 /** 1620 * clk_set_max_rate - set a maximum clock rate for a clock source 1621 * @clk: clock source 1622 * @rate: desired maximum clock rate in Hz, inclusive 1623 * 1624 * Returns success (0) or negative errno. 1625 */ 1626 int clk_set_max_rate(struct clk *clk, unsigned long rate) 1627 { 1628 if (!clk) 1629 return 0; 1630 1631 return clk_set_rate_range(clk, clk->min_rate, rate); 1632 } 1633 EXPORT_SYMBOL_GPL(clk_set_max_rate); 1634 1635 /** 1636 * clk_get_parent - return the parent of a clk 1637 * @clk: the clk whose parent gets returned 1638 * 1639 * Simply returns clk->parent. Returns NULL if clk is NULL. 1640 */ 1641 struct clk *clk_get_parent(struct clk *clk) 1642 { 1643 struct clk *parent; 1644 1645 if (!clk) 1646 return NULL; 1647 1648 clk_prepare_lock(); 1649 /* TODO: Create a per-user clk and change callers to call clk_put */ 1650 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 1651 clk_prepare_unlock(); 1652 1653 return parent; 1654 } 1655 EXPORT_SYMBOL_GPL(clk_get_parent); 1656 1657 /* 1658 * .get_parent is mandatory for clocks with multiple possible parents. It is 1659 * optional for single-parent clocks. Always call .get_parent if it is 1660 * available and WARN if it is missing for multi-parent clocks. 1661 * 1662 * For single-parent clocks without .get_parent, first check to see if the 1663 * .parents array exists, and if so use it to avoid an expensive tree 1664 * traversal. If .parents does not exist then walk the tree. 1665 */ 1666 static struct clk_core *__clk_init_parent(struct clk_core *core) 1667 { 1668 struct clk_core *ret = NULL; 1669 u8 index; 1670 1671 /* handle the trivial cases */ 1672 1673 if (!core->num_parents) 1674 goto out; 1675 1676 if (core->num_parents == 1) { 1677 if (IS_ERR_OR_NULL(core->parent)) 1678 core->parent = clk_core_lookup(core->parent_names[0]); 1679 ret = core->parent; 1680 goto out; 1681 } 1682 1683 if (!core->ops->get_parent) { 1684 WARN(!core->ops->get_parent, 1685 "%s: multi-parent clocks must implement .get_parent\n", 1686 __func__); 1687 goto out; 1688 }; 1689 1690 /* 1691 * Do our best to cache parent clocks in core->parents. This prevents 1692 * unnecessary and expensive lookups. We don't set core->parent here; 1693 * that is done by the calling function. 1694 */ 1695 1696 index = core->ops->get_parent(core->hw); 1697 1698 if (!core->parents) 1699 core->parents = 1700 kcalloc(core->num_parents, sizeof(struct clk *), 1701 GFP_KERNEL); 1702 1703 ret = clk_core_get_parent_by_index(core, index); 1704 1705 out: 1706 return ret; 1707 } 1708 1709 static void clk_core_reparent(struct clk_core *core, 1710 struct clk_core *new_parent) 1711 { 1712 clk_reparent(core, new_parent); 1713 __clk_recalc_accuracies(core); 1714 __clk_recalc_rates(core, POST_RATE_CHANGE); 1715 } 1716 1717 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 1718 { 1719 if (!hw) 1720 return; 1721 1722 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 1723 } 1724 1725 /** 1726 * clk_has_parent - check if a clock is a possible parent for another 1727 * @clk: clock source 1728 * @parent: parent clock source 1729 * 1730 * This function can be used in drivers that need to check that a clock can be 1731 * the parent of another without actually changing the parent. 1732 * 1733 * Returns true if @parent is a possible parent for @clk, false otherwise. 1734 */ 1735 bool clk_has_parent(struct clk *clk, struct clk *parent) 1736 { 1737 struct clk_core *core, *parent_core; 1738 unsigned int i; 1739 1740 /* NULL clocks should be nops, so return success if either is NULL. */ 1741 if (!clk || !parent) 1742 return true; 1743 1744 core = clk->core; 1745 parent_core = parent->core; 1746 1747 /* Optimize for the case where the parent is already the parent. */ 1748 if (core->parent == parent_core) 1749 return true; 1750 1751 for (i = 0; i < core->num_parents; i++) 1752 if (strcmp(core->parent_names[i], parent_core->name) == 0) 1753 return true; 1754 1755 return false; 1756 } 1757 EXPORT_SYMBOL_GPL(clk_has_parent); 1758 1759 static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) 1760 { 1761 int ret = 0; 1762 int p_index = 0; 1763 unsigned long p_rate = 0; 1764 1765 if (!core) 1766 return 0; 1767 1768 /* prevent racing with updates to the clock topology */ 1769 clk_prepare_lock(); 1770 1771 if (core->parent == parent) 1772 goto out; 1773 1774 /* verify ops for for multi-parent clks */ 1775 if ((core->num_parents > 1) && (!core->ops->set_parent)) { 1776 ret = -ENOSYS; 1777 goto out; 1778 } 1779 1780 /* check that we are allowed to re-parent if the clock is in use */ 1781 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 1782 ret = -EBUSY; 1783 goto out; 1784 } 1785 1786 /* try finding the new parent index */ 1787 if (parent) { 1788 p_index = clk_fetch_parent_index(core, parent); 1789 p_rate = parent->rate; 1790 if (p_index < 0) { 1791 pr_debug("%s: clk %s can not be parent of clk %s\n", 1792 __func__, parent->name, core->name); 1793 ret = p_index; 1794 goto out; 1795 } 1796 } 1797 1798 /* propagate PRE_RATE_CHANGE notifications */ 1799 ret = __clk_speculate_rates(core, p_rate); 1800 1801 /* abort if a driver objects */ 1802 if (ret & NOTIFY_STOP_MASK) 1803 goto out; 1804 1805 /* do the re-parent */ 1806 ret = __clk_set_parent(core, parent, p_index); 1807 1808 /* propagate rate an accuracy recalculation accordingly */ 1809 if (ret) { 1810 __clk_recalc_rates(core, ABORT_RATE_CHANGE); 1811 } else { 1812 __clk_recalc_rates(core, POST_RATE_CHANGE); 1813 __clk_recalc_accuracies(core); 1814 } 1815 1816 out: 1817 clk_prepare_unlock(); 1818 1819 return ret; 1820 } 1821 1822 /** 1823 * clk_set_parent - switch the parent of a mux clk 1824 * @clk: the mux clk whose input we are switching 1825 * @parent: the new input to clk 1826 * 1827 * Re-parent clk to use parent as its new input source. If clk is in 1828 * prepared state, the clk will get enabled for the duration of this call. If 1829 * that's not acceptable for a specific clk (Eg: the consumer can't handle 1830 * that, the reparenting is glitchy in hardware, etc), use the 1831 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 1832 * 1833 * After successfully changing clk's parent clk_set_parent will update the 1834 * clk topology, sysfs topology and propagate rate recalculation via 1835 * __clk_recalc_rates. 1836 * 1837 * Returns 0 on success, -EERROR otherwise. 1838 */ 1839 int clk_set_parent(struct clk *clk, struct clk *parent) 1840 { 1841 if (!clk) 1842 return 0; 1843 1844 return clk_core_set_parent(clk->core, parent ? parent->core : NULL); 1845 } 1846 EXPORT_SYMBOL_GPL(clk_set_parent); 1847 1848 /** 1849 * clk_set_phase - adjust the phase shift of a clock signal 1850 * @clk: clock signal source 1851 * @degrees: number of degrees the signal is shifted 1852 * 1853 * Shifts the phase of a clock signal by the specified 1854 * degrees. Returns 0 on success, -EERROR otherwise. 1855 * 1856 * This function makes no distinction about the input or reference 1857 * signal that we adjust the clock signal phase against. For example 1858 * phase locked-loop clock signal generators we may shift phase with 1859 * respect to feedback clock signal input, but for other cases the 1860 * clock phase may be shifted with respect to some other, unspecified 1861 * signal. 1862 * 1863 * Additionally the concept of phase shift does not propagate through 1864 * the clock tree hierarchy, which sets it apart from clock rates and 1865 * clock accuracy. A parent clock phase attribute does not have an 1866 * impact on the phase attribute of a child clock. 1867 */ 1868 int clk_set_phase(struct clk *clk, int degrees) 1869 { 1870 int ret = -EINVAL; 1871 1872 if (!clk) 1873 return 0; 1874 1875 /* sanity check degrees */ 1876 degrees %= 360; 1877 if (degrees < 0) 1878 degrees += 360; 1879 1880 clk_prepare_lock(); 1881 1882 trace_clk_set_phase(clk->core, degrees); 1883 1884 if (clk->core->ops->set_phase) 1885 ret = clk->core->ops->set_phase(clk->core->hw, degrees); 1886 1887 trace_clk_set_phase_complete(clk->core, degrees); 1888 1889 if (!ret) 1890 clk->core->phase = degrees; 1891 1892 clk_prepare_unlock(); 1893 1894 return ret; 1895 } 1896 EXPORT_SYMBOL_GPL(clk_set_phase); 1897 1898 static int clk_core_get_phase(struct clk_core *core) 1899 { 1900 int ret; 1901 1902 clk_prepare_lock(); 1903 ret = core->phase; 1904 clk_prepare_unlock(); 1905 1906 return ret; 1907 } 1908 1909 /** 1910 * clk_get_phase - return the phase shift of a clock signal 1911 * @clk: clock signal source 1912 * 1913 * Returns the phase shift of a clock node in degrees, otherwise returns 1914 * -EERROR. 1915 */ 1916 int clk_get_phase(struct clk *clk) 1917 { 1918 if (!clk) 1919 return 0; 1920 1921 return clk_core_get_phase(clk->core); 1922 } 1923 EXPORT_SYMBOL_GPL(clk_get_phase); 1924 1925 /** 1926 * clk_is_match - check if two clk's point to the same hardware clock 1927 * @p: clk compared against q 1928 * @q: clk compared against p 1929 * 1930 * Returns true if the two struct clk pointers both point to the same hardware 1931 * clock node. Put differently, returns true if struct clk *p and struct clk *q 1932 * share the same struct clk_core object. 1933 * 1934 * Returns false otherwise. Note that two NULL clks are treated as matching. 1935 */ 1936 bool clk_is_match(const struct clk *p, const struct clk *q) 1937 { 1938 /* trivial case: identical struct clk's or both NULL */ 1939 if (p == q) 1940 return true; 1941 1942 /* true if clk->core pointers match. Avoid derefing garbage */ 1943 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 1944 if (p->core == q->core) 1945 return true; 1946 1947 return false; 1948 } 1949 EXPORT_SYMBOL_GPL(clk_is_match); 1950 1951 /*** debugfs support ***/ 1952 1953 #ifdef CONFIG_DEBUG_FS 1954 #include <linux/debugfs.h> 1955 1956 static struct dentry *rootdir; 1957 static int inited = 0; 1958 static DEFINE_MUTEX(clk_debug_lock); 1959 static HLIST_HEAD(clk_debug_list); 1960 1961 static struct hlist_head *all_lists[] = { 1962 &clk_root_list, 1963 &clk_orphan_list, 1964 NULL, 1965 }; 1966 1967 static struct hlist_head *orphan_list[] = { 1968 &clk_orphan_list, 1969 NULL, 1970 }; 1971 1972 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 1973 int level) 1974 { 1975 if (!c) 1976 return; 1977 1978 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", 1979 level * 3 + 1, "", 1980 30 - level * 3, c->name, 1981 c->enable_count, c->prepare_count, clk_core_get_rate(c), 1982 clk_core_get_accuracy(c), clk_core_get_phase(c)); 1983 } 1984 1985 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 1986 int level) 1987 { 1988 struct clk_core *child; 1989 1990 if (!c) 1991 return; 1992 1993 clk_summary_show_one(s, c, level); 1994 1995 hlist_for_each_entry(child, &c->children, child_node) 1996 clk_summary_show_subtree(s, child, level + 1); 1997 } 1998 1999 static int clk_summary_show(struct seq_file *s, void *data) 2000 { 2001 struct clk_core *c; 2002 struct hlist_head **lists = (struct hlist_head **)s->private; 2003 2004 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); 2005 seq_puts(s, "----------------------------------------------------------------------------------------\n"); 2006 2007 clk_prepare_lock(); 2008 2009 for (; *lists; lists++) 2010 hlist_for_each_entry(c, *lists, child_node) 2011 clk_summary_show_subtree(s, c, 0); 2012 2013 clk_prepare_unlock(); 2014 2015 return 0; 2016 } 2017 2018 2019 static int clk_summary_open(struct inode *inode, struct file *file) 2020 { 2021 return single_open(file, clk_summary_show, inode->i_private); 2022 } 2023 2024 static const struct file_operations clk_summary_fops = { 2025 .open = clk_summary_open, 2026 .read = seq_read, 2027 .llseek = seq_lseek, 2028 .release = single_release, 2029 }; 2030 2031 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 2032 { 2033 if (!c) 2034 return; 2035 2036 /* This should be JSON format, i.e. elements separated with a comma */ 2037 seq_printf(s, "\"%s\": { ", c->name); 2038 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 2039 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 2040 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 2041 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 2042 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); 2043 } 2044 2045 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 2046 { 2047 struct clk_core *child; 2048 2049 if (!c) 2050 return; 2051 2052 clk_dump_one(s, c, level); 2053 2054 hlist_for_each_entry(child, &c->children, child_node) { 2055 seq_printf(s, ","); 2056 clk_dump_subtree(s, child, level + 1); 2057 } 2058 2059 seq_printf(s, "}"); 2060 } 2061 2062 static int clk_dump(struct seq_file *s, void *data) 2063 { 2064 struct clk_core *c; 2065 bool first_node = true; 2066 struct hlist_head **lists = (struct hlist_head **)s->private; 2067 2068 seq_printf(s, "{"); 2069 2070 clk_prepare_lock(); 2071 2072 for (; *lists; lists++) { 2073 hlist_for_each_entry(c, *lists, child_node) { 2074 if (!first_node) 2075 seq_puts(s, ","); 2076 first_node = false; 2077 clk_dump_subtree(s, c, 0); 2078 } 2079 } 2080 2081 clk_prepare_unlock(); 2082 2083 seq_puts(s, "}\n"); 2084 return 0; 2085 } 2086 2087 2088 static int clk_dump_open(struct inode *inode, struct file *file) 2089 { 2090 return single_open(file, clk_dump, inode->i_private); 2091 } 2092 2093 static const struct file_operations clk_dump_fops = { 2094 .open = clk_dump_open, 2095 .read = seq_read, 2096 .llseek = seq_lseek, 2097 .release = single_release, 2098 }; 2099 2100 static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 2101 { 2102 struct dentry *d; 2103 int ret = -ENOMEM; 2104 2105 if (!core || !pdentry) { 2106 ret = -EINVAL; 2107 goto out; 2108 } 2109 2110 d = debugfs_create_dir(core->name, pdentry); 2111 if (!d) 2112 goto out; 2113 2114 core->dentry = d; 2115 2116 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry, 2117 (u32 *)&core->rate); 2118 if (!d) 2119 goto err_out; 2120 2121 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry, 2122 (u32 *)&core->accuracy); 2123 if (!d) 2124 goto err_out; 2125 2126 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry, 2127 (u32 *)&core->phase); 2128 if (!d) 2129 goto err_out; 2130 2131 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry, 2132 (u32 *)&core->flags); 2133 if (!d) 2134 goto err_out; 2135 2136 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry, 2137 (u32 *)&core->prepare_count); 2138 if (!d) 2139 goto err_out; 2140 2141 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry, 2142 (u32 *)&core->enable_count); 2143 if (!d) 2144 goto err_out; 2145 2146 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, 2147 (u32 *)&core->notifier_count); 2148 if (!d) 2149 goto err_out; 2150 2151 if (core->ops->debug_init) { 2152 ret = core->ops->debug_init(core->hw, core->dentry); 2153 if (ret) 2154 goto err_out; 2155 } 2156 2157 ret = 0; 2158 goto out; 2159 2160 err_out: 2161 debugfs_remove_recursive(core->dentry); 2162 core->dentry = NULL; 2163 out: 2164 return ret; 2165 } 2166 2167 /** 2168 * clk_debug_register - add a clk node to the debugfs clk directory 2169 * @core: the clk being added to the debugfs clk directory 2170 * 2171 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 2172 * initialized. Otherwise it bails out early since the debugfs clk directory 2173 * will be created lazily by clk_debug_init as part of a late_initcall. 2174 */ 2175 static int clk_debug_register(struct clk_core *core) 2176 { 2177 int ret = 0; 2178 2179 mutex_lock(&clk_debug_lock); 2180 hlist_add_head(&core->debug_node, &clk_debug_list); 2181 2182 if (!inited) 2183 goto unlock; 2184 2185 ret = clk_debug_create_one(core, rootdir); 2186 unlock: 2187 mutex_unlock(&clk_debug_lock); 2188 2189 return ret; 2190 } 2191 2192 /** 2193 * clk_debug_unregister - remove a clk node from the debugfs clk directory 2194 * @core: the clk being removed from the debugfs clk directory 2195 * 2196 * Dynamically removes a clk and all its child nodes from the 2197 * debugfs clk directory if clk->dentry points to debugfs created by 2198 * clk_debug_register in __clk_init. 2199 */ 2200 static void clk_debug_unregister(struct clk_core *core) 2201 { 2202 mutex_lock(&clk_debug_lock); 2203 hlist_del_init(&core->debug_node); 2204 debugfs_remove_recursive(core->dentry); 2205 core->dentry = NULL; 2206 mutex_unlock(&clk_debug_lock); 2207 } 2208 2209 struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, 2210 void *data, const struct file_operations *fops) 2211 { 2212 struct dentry *d = NULL; 2213 2214 if (hw->core->dentry) 2215 d = debugfs_create_file(name, mode, hw->core->dentry, data, 2216 fops); 2217 2218 return d; 2219 } 2220 EXPORT_SYMBOL_GPL(clk_debugfs_add_file); 2221 2222 /** 2223 * clk_debug_init - lazily populate the debugfs clk directory 2224 * 2225 * clks are often initialized very early during boot before memory can be 2226 * dynamically allocated and well before debugfs is setup. This function 2227 * populates the debugfs clk directory once at boot-time when we know that 2228 * debugfs is setup. It should only be called once at boot-time, all other clks 2229 * added dynamically will be done so with clk_debug_register. 2230 */ 2231 static int __init clk_debug_init(void) 2232 { 2233 struct clk_core *core; 2234 struct dentry *d; 2235 2236 rootdir = debugfs_create_dir("clk", NULL); 2237 2238 if (!rootdir) 2239 return -ENOMEM; 2240 2241 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, 2242 &clk_summary_fops); 2243 if (!d) 2244 return -ENOMEM; 2245 2246 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, 2247 &clk_dump_fops); 2248 if (!d) 2249 return -ENOMEM; 2250 2251 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, 2252 &orphan_list, &clk_summary_fops); 2253 if (!d) 2254 return -ENOMEM; 2255 2256 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, 2257 &orphan_list, &clk_dump_fops); 2258 if (!d) 2259 return -ENOMEM; 2260 2261 mutex_lock(&clk_debug_lock); 2262 hlist_for_each_entry(core, &clk_debug_list, debug_node) 2263 clk_debug_create_one(core, rootdir); 2264 2265 inited = 1; 2266 mutex_unlock(&clk_debug_lock); 2267 2268 return 0; 2269 } 2270 late_initcall(clk_debug_init); 2271 #else 2272 static inline int clk_debug_register(struct clk_core *core) { return 0; } 2273 static inline void clk_debug_reparent(struct clk_core *core, 2274 struct clk_core *new_parent) 2275 { 2276 } 2277 static inline void clk_debug_unregister(struct clk_core *core) 2278 { 2279 } 2280 #endif 2281 2282 /** 2283 * __clk_init - initialize the data structures in a struct clk 2284 * @dev: device initializing this clk, placeholder for now 2285 * @clk: clk being initialized 2286 * 2287 * Initializes the lists in struct clk_core, queries the hardware for the 2288 * parent and rate and sets them both. 2289 */ 2290 static int __clk_init(struct device *dev, struct clk *clk_user) 2291 { 2292 int i, ret = 0; 2293 struct clk_core *orphan; 2294 struct hlist_node *tmp2; 2295 struct clk_core *core; 2296 unsigned long rate; 2297 2298 if (!clk_user) 2299 return -EINVAL; 2300 2301 core = clk_user->core; 2302 2303 clk_prepare_lock(); 2304 2305 /* check to see if a clock with this name is already registered */ 2306 if (clk_core_lookup(core->name)) { 2307 pr_debug("%s: clk %s already initialized\n", 2308 __func__, core->name); 2309 ret = -EEXIST; 2310 goto out; 2311 } 2312 2313 /* check that clk_ops are sane. See Documentation/clk.txt */ 2314 if (core->ops->set_rate && 2315 !((core->ops->round_rate || core->ops->determine_rate) && 2316 core->ops->recalc_rate)) { 2317 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 2318 __func__, core->name); 2319 ret = -EINVAL; 2320 goto out; 2321 } 2322 2323 if (core->ops->set_parent && !core->ops->get_parent) { 2324 pr_warning("%s: %s must implement .get_parent & .set_parent\n", 2325 __func__, core->name); 2326 ret = -EINVAL; 2327 goto out; 2328 } 2329 2330 if (core->ops->set_rate_and_parent && 2331 !(core->ops->set_parent && core->ops->set_rate)) { 2332 pr_warn("%s: %s must implement .set_parent & .set_rate\n", 2333 __func__, core->name); 2334 ret = -EINVAL; 2335 goto out; 2336 } 2337 2338 /* throw a WARN if any entries in parent_names are NULL */ 2339 for (i = 0; i < core->num_parents; i++) 2340 WARN(!core->parent_names[i], 2341 "%s: invalid NULL in %s's .parent_names\n", 2342 __func__, core->name); 2343 2344 /* 2345 * Allocate an array of struct clk *'s to avoid unnecessary string 2346 * look-ups of clk's possible parents. This can fail for clocks passed 2347 * in to clk_init during early boot; thus any access to core->parents[] 2348 * must always check for a NULL pointer and try to populate it if 2349 * necessary. 2350 * 2351 * If core->parents is not NULL we skip this entire block. This allows 2352 * for clock drivers to statically initialize core->parents. 2353 */ 2354 if (core->num_parents > 1 && !core->parents) { 2355 core->parents = kcalloc(core->num_parents, sizeof(struct clk *), 2356 GFP_KERNEL); 2357 /* 2358 * clk_core_lookup returns NULL for parents that have not been 2359 * clk_init'd; thus any access to clk->parents[] must check 2360 * for a NULL pointer. We can always perform lazy lookups for 2361 * missing parents later on. 2362 */ 2363 if (core->parents) 2364 for (i = 0; i < core->num_parents; i++) 2365 core->parents[i] = 2366 clk_core_lookup(core->parent_names[i]); 2367 } 2368 2369 core->parent = __clk_init_parent(core); 2370 2371 /* 2372 * Populate core->parent if parent has already been __clk_init'd. If 2373 * parent has not yet been __clk_init'd then place clk in the orphan 2374 * list. If clk has set the CLK_IS_ROOT flag then place it in the root 2375 * clk list. 2376 * 2377 * Every time a new clk is clk_init'd then we walk the list of orphan 2378 * clocks and re-parent any that are children of the clock currently 2379 * being clk_init'd. 2380 */ 2381 if (core->parent) { 2382 hlist_add_head(&core->child_node, 2383 &core->parent->children); 2384 core->orphan = core->parent->orphan; 2385 } else if (core->flags & CLK_IS_ROOT) { 2386 hlist_add_head(&core->child_node, &clk_root_list); 2387 core->orphan = false; 2388 } else { 2389 hlist_add_head(&core->child_node, &clk_orphan_list); 2390 core->orphan = true; 2391 } 2392 2393 /* 2394 * Set clk's accuracy. The preferred method is to use 2395 * .recalc_accuracy. For simple clocks and lazy developers the default 2396 * fallback is to use the parent's accuracy. If a clock doesn't have a 2397 * parent (or is orphaned) then accuracy is set to zero (perfect 2398 * clock). 2399 */ 2400 if (core->ops->recalc_accuracy) 2401 core->accuracy = core->ops->recalc_accuracy(core->hw, 2402 __clk_get_accuracy(core->parent)); 2403 else if (core->parent) 2404 core->accuracy = core->parent->accuracy; 2405 else 2406 core->accuracy = 0; 2407 2408 /* 2409 * Set clk's phase. 2410 * Since a phase is by definition relative to its parent, just 2411 * query the current clock phase, or just assume it's in phase. 2412 */ 2413 if (core->ops->get_phase) 2414 core->phase = core->ops->get_phase(core->hw); 2415 else 2416 core->phase = 0; 2417 2418 /* 2419 * Set clk's rate. The preferred method is to use .recalc_rate. For 2420 * simple clocks and lazy developers the default fallback is to use the 2421 * parent's rate. If a clock doesn't have a parent (or is orphaned) 2422 * then rate is set to zero. 2423 */ 2424 if (core->ops->recalc_rate) 2425 rate = core->ops->recalc_rate(core->hw, 2426 clk_core_get_rate_nolock(core->parent)); 2427 else if (core->parent) 2428 rate = core->parent->rate; 2429 else 2430 rate = 0; 2431 core->rate = core->req_rate = rate; 2432 2433 /* 2434 * walk the list of orphan clocks and reparent any that are children of 2435 * this clock 2436 */ 2437 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 2438 if (orphan->num_parents && orphan->ops->get_parent) { 2439 i = orphan->ops->get_parent(orphan->hw); 2440 if (i >= 0 && i < orphan->num_parents && 2441 !strcmp(core->name, orphan->parent_names[i])) 2442 clk_core_reparent(orphan, core); 2443 continue; 2444 } 2445 2446 for (i = 0; i < orphan->num_parents; i++) 2447 if (!strcmp(core->name, orphan->parent_names[i])) { 2448 clk_core_reparent(orphan, core); 2449 break; 2450 } 2451 } 2452 2453 /* 2454 * optional platform-specific magic 2455 * 2456 * The .init callback is not used by any of the basic clock types, but 2457 * exists for weird hardware that must perform initialization magic. 2458 * Please consider other ways of solving initialization problems before 2459 * using this callback, as its use is discouraged. 2460 */ 2461 if (core->ops->init) 2462 core->ops->init(core->hw); 2463 2464 kref_init(&core->ref); 2465 out: 2466 clk_prepare_unlock(); 2467 2468 if (!ret) 2469 clk_debug_register(core); 2470 2471 return ret; 2472 } 2473 2474 struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, 2475 const char *con_id) 2476 { 2477 struct clk *clk; 2478 2479 /* This is to allow this function to be chained to others */ 2480 if (!hw || IS_ERR(hw)) 2481 return (struct clk *) hw; 2482 2483 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2484 if (!clk) 2485 return ERR_PTR(-ENOMEM); 2486 2487 clk->core = hw->core; 2488 clk->dev_id = dev_id; 2489 clk->con_id = con_id; 2490 clk->max_rate = ULONG_MAX; 2491 2492 clk_prepare_lock(); 2493 hlist_add_head(&clk->clks_node, &hw->core->clks); 2494 clk_prepare_unlock(); 2495 2496 return clk; 2497 } 2498 2499 void __clk_free_clk(struct clk *clk) 2500 { 2501 clk_prepare_lock(); 2502 hlist_del(&clk->clks_node); 2503 clk_prepare_unlock(); 2504 2505 kfree(clk); 2506 } 2507 2508 /** 2509 * clk_register - allocate a new clock, register it and return an opaque cookie 2510 * @dev: device that is registering this clock 2511 * @hw: link to hardware-specific clock data 2512 * 2513 * clk_register is the primary interface for populating the clock tree with new 2514 * clock nodes. It returns a pointer to the newly allocated struct clk which 2515 * cannot be dereferenced by driver code but may be used in conjunction with the 2516 * rest of the clock API. In the event of an error clk_register will return an 2517 * error code; drivers must test for an error code after calling clk_register. 2518 */ 2519 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2520 { 2521 int i, ret; 2522 struct clk_core *core; 2523 2524 core = kzalloc(sizeof(*core), GFP_KERNEL); 2525 if (!core) { 2526 ret = -ENOMEM; 2527 goto fail_out; 2528 } 2529 2530 core->name = kstrdup_const(hw->init->name, GFP_KERNEL); 2531 if (!core->name) { 2532 ret = -ENOMEM; 2533 goto fail_name; 2534 } 2535 core->ops = hw->init->ops; 2536 if (dev && dev->driver) 2537 core->owner = dev->driver->owner; 2538 core->hw = hw; 2539 core->flags = hw->init->flags; 2540 core->num_parents = hw->init->num_parents; 2541 core->min_rate = 0; 2542 core->max_rate = ULONG_MAX; 2543 hw->core = core; 2544 2545 /* allocate local copy in case parent_names is __initdata */ 2546 core->parent_names = kcalloc(core->num_parents, sizeof(char *), 2547 GFP_KERNEL); 2548 2549 if (!core->parent_names) { 2550 ret = -ENOMEM; 2551 goto fail_parent_names; 2552 } 2553 2554 2555 /* copy each string name in case parent_names is __initdata */ 2556 for (i = 0; i < core->num_parents; i++) { 2557 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], 2558 GFP_KERNEL); 2559 if (!core->parent_names[i]) { 2560 ret = -ENOMEM; 2561 goto fail_parent_names_copy; 2562 } 2563 } 2564 2565 INIT_HLIST_HEAD(&core->clks); 2566 2567 hw->clk = __clk_create_clk(hw, NULL, NULL); 2568 if (IS_ERR(hw->clk)) { 2569 ret = PTR_ERR(hw->clk); 2570 goto fail_parent_names_copy; 2571 } 2572 2573 ret = __clk_init(dev, hw->clk); 2574 if (!ret) 2575 return hw->clk; 2576 2577 __clk_free_clk(hw->clk); 2578 hw->clk = NULL; 2579 2580 fail_parent_names_copy: 2581 while (--i >= 0) 2582 kfree_const(core->parent_names[i]); 2583 kfree(core->parent_names); 2584 fail_parent_names: 2585 kfree_const(core->name); 2586 fail_name: 2587 kfree(core); 2588 fail_out: 2589 return ERR_PTR(ret); 2590 } 2591 EXPORT_SYMBOL_GPL(clk_register); 2592 2593 /* Free memory allocated for a clock. */ 2594 static void __clk_release(struct kref *ref) 2595 { 2596 struct clk_core *core = container_of(ref, struct clk_core, ref); 2597 int i = core->num_parents; 2598 2599 lockdep_assert_held(&prepare_lock); 2600 2601 kfree(core->parents); 2602 while (--i >= 0) 2603 kfree_const(core->parent_names[i]); 2604 2605 kfree(core->parent_names); 2606 kfree_const(core->name); 2607 kfree(core); 2608 } 2609 2610 /* 2611 * Empty clk_ops for unregistered clocks. These are used temporarily 2612 * after clk_unregister() was called on a clock and until last clock 2613 * consumer calls clk_put() and the struct clk object is freed. 2614 */ 2615 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 2616 { 2617 return -ENXIO; 2618 } 2619 2620 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 2621 { 2622 WARN_ON_ONCE(1); 2623 } 2624 2625 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 2626 unsigned long parent_rate) 2627 { 2628 return -ENXIO; 2629 } 2630 2631 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 2632 { 2633 return -ENXIO; 2634 } 2635 2636 static const struct clk_ops clk_nodrv_ops = { 2637 .enable = clk_nodrv_prepare_enable, 2638 .disable = clk_nodrv_disable_unprepare, 2639 .prepare = clk_nodrv_prepare_enable, 2640 .unprepare = clk_nodrv_disable_unprepare, 2641 .set_rate = clk_nodrv_set_rate, 2642 .set_parent = clk_nodrv_set_parent, 2643 }; 2644 2645 /** 2646 * clk_unregister - unregister a currently registered clock 2647 * @clk: clock to unregister 2648 */ 2649 void clk_unregister(struct clk *clk) 2650 { 2651 unsigned long flags; 2652 2653 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2654 return; 2655 2656 clk_debug_unregister(clk->core); 2657 2658 clk_prepare_lock(); 2659 2660 if (clk->core->ops == &clk_nodrv_ops) { 2661 pr_err("%s: unregistered clock: %s\n", __func__, 2662 clk->core->name); 2663 return; 2664 } 2665 /* 2666 * Assign empty clock ops for consumers that might still hold 2667 * a reference to this clock. 2668 */ 2669 flags = clk_enable_lock(); 2670 clk->core->ops = &clk_nodrv_ops; 2671 clk_enable_unlock(flags); 2672 2673 if (!hlist_empty(&clk->core->children)) { 2674 struct clk_core *child; 2675 struct hlist_node *t; 2676 2677 /* Reparent all children to the orphan list. */ 2678 hlist_for_each_entry_safe(child, t, &clk->core->children, 2679 child_node) 2680 clk_core_set_parent(child, NULL); 2681 } 2682 2683 hlist_del_init(&clk->core->child_node); 2684 2685 if (clk->core->prepare_count) 2686 pr_warn("%s: unregistering prepared clock: %s\n", 2687 __func__, clk->core->name); 2688 kref_put(&clk->core->ref, __clk_release); 2689 2690 clk_prepare_unlock(); 2691 } 2692 EXPORT_SYMBOL_GPL(clk_unregister); 2693 2694 static void devm_clk_release(struct device *dev, void *res) 2695 { 2696 clk_unregister(*(struct clk **)res); 2697 } 2698 2699 /** 2700 * devm_clk_register - resource managed clk_register() 2701 * @dev: device that is registering this clock 2702 * @hw: link to hardware-specific clock data 2703 * 2704 * Managed clk_register(). Clocks returned from this function are 2705 * automatically clk_unregister()ed on driver detach. See clk_register() for 2706 * more information. 2707 */ 2708 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2709 { 2710 struct clk *clk; 2711 struct clk **clkp; 2712 2713 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 2714 if (!clkp) 2715 return ERR_PTR(-ENOMEM); 2716 2717 clk = clk_register(dev, hw); 2718 if (!IS_ERR(clk)) { 2719 *clkp = clk; 2720 devres_add(dev, clkp); 2721 } else { 2722 devres_free(clkp); 2723 } 2724 2725 return clk; 2726 } 2727 EXPORT_SYMBOL_GPL(devm_clk_register); 2728 2729 static int devm_clk_match(struct device *dev, void *res, void *data) 2730 { 2731 struct clk *c = res; 2732 if (WARN_ON(!c)) 2733 return 0; 2734 return c == data; 2735 } 2736 2737 /** 2738 * devm_clk_unregister - resource managed clk_unregister() 2739 * @clk: clock to unregister 2740 * 2741 * Deallocate a clock allocated with devm_clk_register(). Normally 2742 * this function will not need to be called and the resource management 2743 * code will ensure that the resource is freed. 2744 */ 2745 void devm_clk_unregister(struct device *dev, struct clk *clk) 2746 { 2747 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 2748 } 2749 EXPORT_SYMBOL_GPL(devm_clk_unregister); 2750 2751 /* 2752 * clkdev helpers 2753 */ 2754 int __clk_get(struct clk *clk) 2755 { 2756 struct clk_core *core = !clk ? NULL : clk->core; 2757 2758 if (core) { 2759 if (!try_module_get(core->owner)) 2760 return 0; 2761 2762 kref_get(&core->ref); 2763 } 2764 return 1; 2765 } 2766 2767 void __clk_put(struct clk *clk) 2768 { 2769 struct module *owner; 2770 2771 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2772 return; 2773 2774 clk_prepare_lock(); 2775 2776 hlist_del(&clk->clks_node); 2777 if (clk->min_rate > clk->core->req_rate || 2778 clk->max_rate < clk->core->req_rate) 2779 clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 2780 2781 owner = clk->core->owner; 2782 kref_put(&clk->core->ref, __clk_release); 2783 2784 clk_prepare_unlock(); 2785 2786 module_put(owner); 2787 2788 kfree(clk); 2789 } 2790 2791 /*** clk rate change notifiers ***/ 2792 2793 /** 2794 * clk_notifier_register - add a clk rate change notifier 2795 * @clk: struct clk * to watch 2796 * @nb: struct notifier_block * with callback info 2797 * 2798 * Request notification when clk's rate changes. This uses an SRCU 2799 * notifier because we want it to block and notifier unregistrations are 2800 * uncommon. The callbacks associated with the notifier must not 2801 * re-enter into the clk framework by calling any top-level clk APIs; 2802 * this will cause a nested prepare_lock mutex. 2803 * 2804 * In all notification cases cases (pre, post and abort rate change) the 2805 * original clock rate is passed to the callback via struct 2806 * clk_notifier_data.old_rate and the new frequency is passed via struct 2807 * clk_notifier_data.new_rate. 2808 * 2809 * clk_notifier_register() must be called from non-atomic context. 2810 * Returns -EINVAL if called with null arguments, -ENOMEM upon 2811 * allocation failure; otherwise, passes along the return value of 2812 * srcu_notifier_chain_register(). 2813 */ 2814 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 2815 { 2816 struct clk_notifier *cn; 2817 int ret = -ENOMEM; 2818 2819 if (!clk || !nb) 2820 return -EINVAL; 2821 2822 clk_prepare_lock(); 2823 2824 /* search the list of notifiers for this clk */ 2825 list_for_each_entry(cn, &clk_notifier_list, node) 2826 if (cn->clk == clk) 2827 break; 2828 2829 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 2830 if (cn->clk != clk) { 2831 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); 2832 if (!cn) 2833 goto out; 2834 2835 cn->clk = clk; 2836 srcu_init_notifier_head(&cn->notifier_head); 2837 2838 list_add(&cn->node, &clk_notifier_list); 2839 } 2840 2841 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 2842 2843 clk->core->notifier_count++; 2844 2845 out: 2846 clk_prepare_unlock(); 2847 2848 return ret; 2849 } 2850 EXPORT_SYMBOL_GPL(clk_notifier_register); 2851 2852 /** 2853 * clk_notifier_unregister - remove a clk rate change notifier 2854 * @clk: struct clk * 2855 * @nb: struct notifier_block * with callback info 2856 * 2857 * Request no further notification for changes to 'clk' and frees memory 2858 * allocated in clk_notifier_register. 2859 * 2860 * Returns -EINVAL if called with null arguments; otherwise, passes 2861 * along the return value of srcu_notifier_chain_unregister(). 2862 */ 2863 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 2864 { 2865 struct clk_notifier *cn = NULL; 2866 int ret = -EINVAL; 2867 2868 if (!clk || !nb) 2869 return -EINVAL; 2870 2871 clk_prepare_lock(); 2872 2873 list_for_each_entry(cn, &clk_notifier_list, node) 2874 if (cn->clk == clk) 2875 break; 2876 2877 if (cn->clk == clk) { 2878 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 2879 2880 clk->core->notifier_count--; 2881 2882 /* XXX the notifier code should handle this better */ 2883 if (!cn->notifier_head.head) { 2884 srcu_cleanup_notifier_head(&cn->notifier_head); 2885 list_del(&cn->node); 2886 kfree(cn); 2887 } 2888 2889 } else { 2890 ret = -ENOENT; 2891 } 2892 2893 clk_prepare_unlock(); 2894 2895 return ret; 2896 } 2897 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 2898 2899 #ifdef CONFIG_OF 2900 /** 2901 * struct of_clk_provider - Clock provider registration structure 2902 * @link: Entry in global list of clock providers 2903 * @node: Pointer to device tree node of clock provider 2904 * @get: Get clock callback. Returns NULL or a struct clk for the 2905 * given clock specifier 2906 * @data: context pointer to be passed into @get callback 2907 */ 2908 struct of_clk_provider { 2909 struct list_head link; 2910 2911 struct device_node *node; 2912 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 2913 void *data; 2914 }; 2915 2916 static const struct of_device_id __clk_of_table_sentinel 2917 __used __section(__clk_of_table_end); 2918 2919 static LIST_HEAD(of_clk_providers); 2920 static DEFINE_MUTEX(of_clk_mutex); 2921 2922 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 2923 void *data) 2924 { 2925 return data; 2926 } 2927 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 2928 2929 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 2930 { 2931 struct clk_onecell_data *clk_data = data; 2932 unsigned int idx = clkspec->args[0]; 2933 2934 if (idx >= clk_data->clk_num) { 2935 pr_err("%s: invalid clock index %d\n", __func__, idx); 2936 return ERR_PTR(-EINVAL); 2937 } 2938 2939 return clk_data->clks[idx]; 2940 } 2941 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 2942 2943 /** 2944 * of_clk_add_provider() - Register a clock provider for a node 2945 * @np: Device node pointer associated with clock provider 2946 * @clk_src_get: callback for decoding clock 2947 * @data: context pointer for @clk_src_get callback. 2948 */ 2949 int of_clk_add_provider(struct device_node *np, 2950 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 2951 void *data), 2952 void *data) 2953 { 2954 struct of_clk_provider *cp; 2955 int ret; 2956 2957 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL); 2958 if (!cp) 2959 return -ENOMEM; 2960 2961 cp->node = of_node_get(np); 2962 cp->data = data; 2963 cp->get = clk_src_get; 2964 2965 mutex_lock(&of_clk_mutex); 2966 list_add(&cp->link, &of_clk_providers); 2967 mutex_unlock(&of_clk_mutex); 2968 pr_debug("Added clock from %s\n", np->full_name); 2969 2970 ret = of_clk_set_defaults(np, true); 2971 if (ret < 0) 2972 of_clk_del_provider(np); 2973 2974 return ret; 2975 } 2976 EXPORT_SYMBOL_GPL(of_clk_add_provider); 2977 2978 /** 2979 * of_clk_del_provider() - Remove a previously registered clock provider 2980 * @np: Device node pointer associated with clock provider 2981 */ 2982 void of_clk_del_provider(struct device_node *np) 2983 { 2984 struct of_clk_provider *cp; 2985 2986 mutex_lock(&of_clk_mutex); 2987 list_for_each_entry(cp, &of_clk_providers, link) { 2988 if (cp->node == np) { 2989 list_del(&cp->link); 2990 of_node_put(cp->node); 2991 kfree(cp); 2992 break; 2993 } 2994 } 2995 mutex_unlock(&of_clk_mutex); 2996 } 2997 EXPORT_SYMBOL_GPL(of_clk_del_provider); 2998 2999 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, 3000 const char *dev_id, const char *con_id) 3001 { 3002 struct of_clk_provider *provider; 3003 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 3004 3005 if (!clkspec) 3006 return ERR_PTR(-EINVAL); 3007 3008 /* Check if we have such a provider in our array */ 3009 mutex_lock(&of_clk_mutex); 3010 list_for_each_entry(provider, &of_clk_providers, link) { 3011 if (provider->node == clkspec->np) 3012 clk = provider->get(clkspec, provider->data); 3013 if (!IS_ERR(clk)) { 3014 clk = __clk_create_clk(__clk_get_hw(clk), dev_id, 3015 con_id); 3016 3017 if (!IS_ERR(clk) && !__clk_get(clk)) { 3018 __clk_free_clk(clk); 3019 clk = ERR_PTR(-ENOENT); 3020 } 3021 3022 break; 3023 } 3024 } 3025 mutex_unlock(&of_clk_mutex); 3026 3027 return clk; 3028 } 3029 3030 /** 3031 * of_clk_get_from_provider() - Lookup a clock from a clock provider 3032 * @clkspec: pointer to a clock specifier data structure 3033 * 3034 * This function looks up a struct clk from the registered list of clock 3035 * providers, an input is a clock specifier data structure as returned 3036 * from the of_parse_phandle_with_args() function call. 3037 */ 3038 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 3039 { 3040 return __of_clk_get_from_provider(clkspec, NULL, __func__); 3041 } 3042 3043 int of_clk_get_parent_count(struct device_node *np) 3044 { 3045 return of_count_phandle_with_args(np, "clocks", "#clock-cells"); 3046 } 3047 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 3048 3049 const char *of_clk_get_parent_name(struct device_node *np, int index) 3050 { 3051 struct of_phandle_args clkspec; 3052 struct property *prop; 3053 const char *clk_name; 3054 const __be32 *vp; 3055 u32 pv; 3056 int rc; 3057 int count; 3058 3059 if (index < 0) 3060 return NULL; 3061 3062 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 3063 &clkspec); 3064 if (rc) 3065 return NULL; 3066 3067 index = clkspec.args_count ? clkspec.args[0] : 0; 3068 count = 0; 3069 3070 /* if there is an indices property, use it to transfer the index 3071 * specified into an array offset for the clock-output-names property. 3072 */ 3073 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 3074 if (index == pv) { 3075 index = count; 3076 break; 3077 } 3078 count++; 3079 } 3080 3081 if (of_property_read_string_index(clkspec.np, "clock-output-names", 3082 index, 3083 &clk_name) < 0) 3084 clk_name = clkspec.np->name; 3085 3086 of_node_put(clkspec.np); 3087 return clk_name; 3088 } 3089 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 3090 3091 /** 3092 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 3093 * number of parents 3094 * @np: Device node pointer associated with clock provider 3095 * @parents: pointer to char array that hold the parents' names 3096 * @size: size of the @parents array 3097 * 3098 * Return: number of parents for the clock node. 3099 */ 3100 int of_clk_parent_fill(struct device_node *np, const char **parents, 3101 unsigned int size) 3102 { 3103 unsigned int i = 0; 3104 3105 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 3106 i++; 3107 3108 return i; 3109 } 3110 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 3111 3112 struct clock_provider { 3113 of_clk_init_cb_t clk_init_cb; 3114 struct device_node *np; 3115 struct list_head node; 3116 }; 3117 3118 /* 3119 * This function looks for a parent clock. If there is one, then it 3120 * checks that the provider for this parent clock was initialized, in 3121 * this case the parent clock will be ready. 3122 */ 3123 static int parent_ready(struct device_node *np) 3124 { 3125 int i = 0; 3126 3127 while (true) { 3128 struct clk *clk = of_clk_get(np, i); 3129 3130 /* this parent is ready we can check the next one */ 3131 if (!IS_ERR(clk)) { 3132 clk_put(clk); 3133 i++; 3134 continue; 3135 } 3136 3137 /* at least one parent is not ready, we exit now */ 3138 if (PTR_ERR(clk) == -EPROBE_DEFER) 3139 return 0; 3140 3141 /* 3142 * Here we make assumption that the device tree is 3143 * written correctly. So an error means that there is 3144 * no more parent. As we didn't exit yet, then the 3145 * previous parent are ready. If there is no clock 3146 * parent, no need to wait for them, then we can 3147 * consider their absence as being ready 3148 */ 3149 return 1; 3150 } 3151 } 3152 3153 /** 3154 * of_clk_init() - Scan and init clock providers from the DT 3155 * @matches: array of compatible values and init functions for providers. 3156 * 3157 * This function scans the device tree for matching clock providers 3158 * and calls their initialization functions. It also does it by trying 3159 * to follow the dependencies. 3160 */ 3161 void __init of_clk_init(const struct of_device_id *matches) 3162 { 3163 const struct of_device_id *match; 3164 struct device_node *np; 3165 struct clock_provider *clk_provider, *next; 3166 bool is_init_done; 3167 bool force = false; 3168 LIST_HEAD(clk_provider_list); 3169 3170 if (!matches) 3171 matches = &__clk_of_table; 3172 3173 /* First prepare the list of the clocks providers */ 3174 for_each_matching_node_and_match(np, matches, &match) { 3175 struct clock_provider *parent; 3176 3177 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 3178 if (!parent) { 3179 list_for_each_entry_safe(clk_provider, next, 3180 &clk_provider_list, node) { 3181 list_del(&clk_provider->node); 3182 kfree(clk_provider); 3183 } 3184 return; 3185 } 3186 3187 parent->clk_init_cb = match->data; 3188 parent->np = np; 3189 list_add_tail(&parent->node, &clk_provider_list); 3190 } 3191 3192 while (!list_empty(&clk_provider_list)) { 3193 is_init_done = false; 3194 list_for_each_entry_safe(clk_provider, next, 3195 &clk_provider_list, node) { 3196 if (force || parent_ready(clk_provider->np)) { 3197 3198 clk_provider->clk_init_cb(clk_provider->np); 3199 of_clk_set_defaults(clk_provider->np, true); 3200 3201 list_del(&clk_provider->node); 3202 kfree(clk_provider); 3203 is_init_done = true; 3204 } 3205 } 3206 3207 /* 3208 * We didn't manage to initialize any of the 3209 * remaining providers during the last loop, so now we 3210 * initialize all the remaining ones unconditionally 3211 * in case the clock parent was not mandatory 3212 */ 3213 if (!is_init_done) 3214 force = true; 3215 } 3216 } 3217 #endif 3218