1 /* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/clk.txt 10 */ 11 12 #include <linux/clk-private.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/spinlock.h> 16 #include <linux/err.h> 17 #include <linux/list.h> 18 #include <linux/slab.h> 19 #include <linux/of.h> 20 #include <linux/device.h> 21 #include <linux/init.h> 22 #include <linux/sched.h> 23 24 #include "clk.h" 25 26 static DEFINE_SPINLOCK(enable_lock); 27 static DEFINE_MUTEX(prepare_lock); 28 29 static struct task_struct *prepare_owner; 30 static struct task_struct *enable_owner; 31 32 static int prepare_refcnt; 33 static int enable_refcnt; 34 35 static HLIST_HEAD(clk_root_list); 36 static HLIST_HEAD(clk_orphan_list); 37 static LIST_HEAD(clk_notifier_list); 38 39 /*** locking ***/ 40 static void clk_prepare_lock(void) 41 { 42 if (!mutex_trylock(&prepare_lock)) { 43 if (prepare_owner == current) { 44 prepare_refcnt++; 45 return; 46 } 47 mutex_lock(&prepare_lock); 48 } 49 WARN_ON_ONCE(prepare_owner != NULL); 50 WARN_ON_ONCE(prepare_refcnt != 0); 51 prepare_owner = current; 52 prepare_refcnt = 1; 53 } 54 55 static void clk_prepare_unlock(void) 56 { 57 WARN_ON_ONCE(prepare_owner != current); 58 WARN_ON_ONCE(prepare_refcnt == 0); 59 60 if (--prepare_refcnt) 61 return; 62 prepare_owner = NULL; 63 mutex_unlock(&prepare_lock); 64 } 65 66 static unsigned long clk_enable_lock(void) 67 { 68 unsigned long flags; 69 70 if (!spin_trylock_irqsave(&enable_lock, flags)) { 71 if (enable_owner == current) { 72 enable_refcnt++; 73 return flags; 74 } 75 spin_lock_irqsave(&enable_lock, flags); 76 } 77 WARN_ON_ONCE(enable_owner != NULL); 78 WARN_ON_ONCE(enable_refcnt != 0); 79 enable_owner = current; 80 enable_refcnt = 1; 81 return flags; 82 } 83 84 static void clk_enable_unlock(unsigned long flags) 85 { 86 WARN_ON_ONCE(enable_owner != current); 87 WARN_ON_ONCE(enable_refcnt == 0); 88 89 if (--enable_refcnt) 90 return; 91 enable_owner = NULL; 92 spin_unlock_irqrestore(&enable_lock, flags); 93 } 94 95 /*** debugfs support ***/ 96 97 #ifdef CONFIG_DEBUG_FS 98 #include <linux/debugfs.h> 99 100 static struct dentry *rootdir; 101 static struct dentry *orphandir; 102 static int inited = 0; 103 104 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) 105 { 106 if (!c) 107 return; 108 109 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu %-11lu", 110 level * 3 + 1, "", 111 30 - level * 3, c->name, 112 c->enable_count, c->prepare_count, clk_get_rate(c), 113 clk_get_accuracy(c)); 114 seq_printf(s, "\n"); 115 } 116 117 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c, 118 int level) 119 { 120 struct clk *child; 121 122 if (!c) 123 return; 124 125 clk_summary_show_one(s, c, level); 126 127 hlist_for_each_entry(child, &c->children, child_node) 128 clk_summary_show_subtree(s, child, level + 1); 129 } 130 131 static int clk_summary_show(struct seq_file *s, void *data) 132 { 133 struct clk *c; 134 135 seq_printf(s, " clock enable_cnt prepare_cnt rate accuracy\n"); 136 seq_printf(s, "---------------------------------------------------------------------------------\n"); 137 138 clk_prepare_lock(); 139 140 hlist_for_each_entry(c, &clk_root_list, child_node) 141 clk_summary_show_subtree(s, c, 0); 142 143 hlist_for_each_entry(c, &clk_orphan_list, child_node) 144 clk_summary_show_subtree(s, c, 0); 145 146 clk_prepare_unlock(); 147 148 return 0; 149 } 150 151 152 static int clk_summary_open(struct inode *inode, struct file *file) 153 { 154 return single_open(file, clk_summary_show, inode->i_private); 155 } 156 157 static const struct file_operations clk_summary_fops = { 158 .open = clk_summary_open, 159 .read = seq_read, 160 .llseek = seq_lseek, 161 .release = single_release, 162 }; 163 164 static void clk_dump_one(struct seq_file *s, struct clk *c, int level) 165 { 166 if (!c) 167 return; 168 169 seq_printf(s, "\"%s\": { ", c->name); 170 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 171 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 172 seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); 173 seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c)); 174 } 175 176 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 177 { 178 struct clk *child; 179 180 if (!c) 181 return; 182 183 clk_dump_one(s, c, level); 184 185 hlist_for_each_entry(child, &c->children, child_node) { 186 seq_printf(s, ","); 187 clk_dump_subtree(s, child, level + 1); 188 } 189 190 seq_printf(s, "}"); 191 } 192 193 static int clk_dump(struct seq_file *s, void *data) 194 { 195 struct clk *c; 196 bool first_node = true; 197 198 seq_printf(s, "{"); 199 200 clk_prepare_lock(); 201 202 hlist_for_each_entry(c, &clk_root_list, child_node) { 203 if (!first_node) 204 seq_printf(s, ","); 205 first_node = false; 206 clk_dump_subtree(s, c, 0); 207 } 208 209 hlist_for_each_entry(c, &clk_orphan_list, child_node) { 210 seq_printf(s, ","); 211 clk_dump_subtree(s, c, 0); 212 } 213 214 clk_prepare_unlock(); 215 216 seq_printf(s, "}"); 217 return 0; 218 } 219 220 221 static int clk_dump_open(struct inode *inode, struct file *file) 222 { 223 return single_open(file, clk_dump, inode->i_private); 224 } 225 226 static const struct file_operations clk_dump_fops = { 227 .open = clk_dump_open, 228 .read = seq_read, 229 .llseek = seq_lseek, 230 .release = single_release, 231 }; 232 233 /* caller must hold prepare_lock */ 234 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 235 { 236 struct dentry *d; 237 int ret = -ENOMEM; 238 239 if (!clk || !pdentry) { 240 ret = -EINVAL; 241 goto out; 242 } 243 244 d = debugfs_create_dir(clk->name, pdentry); 245 if (!d) 246 goto out; 247 248 clk->dentry = d; 249 250 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry, 251 (u32 *)&clk->rate); 252 if (!d) 253 goto err_out; 254 255 d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry, 256 (u32 *)&clk->accuracy); 257 if (!d) 258 goto err_out; 259 260 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry, 261 (u32 *)&clk->flags); 262 if (!d) 263 goto err_out; 264 265 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry, 266 (u32 *)&clk->prepare_count); 267 if (!d) 268 goto err_out; 269 270 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry, 271 (u32 *)&clk->enable_count); 272 if (!d) 273 goto err_out; 274 275 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry, 276 (u32 *)&clk->notifier_count); 277 if (!d) 278 goto err_out; 279 280 ret = 0; 281 goto out; 282 283 err_out: 284 debugfs_remove_recursive(clk->dentry); 285 clk->dentry = NULL; 286 out: 287 return ret; 288 } 289 290 /* caller must hold prepare_lock */ 291 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry) 292 { 293 struct clk *child; 294 int ret = -EINVAL;; 295 296 if (!clk || !pdentry) 297 goto out; 298 299 ret = clk_debug_create_one(clk, pdentry); 300 301 if (ret) 302 goto out; 303 304 hlist_for_each_entry(child, &clk->children, child_node) 305 clk_debug_create_subtree(child, clk->dentry); 306 307 ret = 0; 308 out: 309 return ret; 310 } 311 312 /** 313 * clk_debug_register - add a clk node to the debugfs clk tree 314 * @clk: the clk being added to the debugfs clk tree 315 * 316 * Dynamically adds a clk to the debugfs clk tree if debugfs has been 317 * initialized. Otherwise it bails out early since the debugfs clk tree 318 * will be created lazily by clk_debug_init as part of a late_initcall. 319 * 320 * Caller must hold prepare_lock. Only clk_init calls this function (so 321 * far) so this is taken care. 322 */ 323 static int clk_debug_register(struct clk *clk) 324 { 325 struct clk *parent; 326 struct dentry *pdentry; 327 int ret = 0; 328 329 if (!inited) 330 goto out; 331 332 parent = clk->parent; 333 334 /* 335 * Check to see if a clk is a root clk. Also check that it is 336 * safe to add this clk to debugfs 337 */ 338 if (!parent) 339 if (clk->flags & CLK_IS_ROOT) 340 pdentry = rootdir; 341 else 342 pdentry = orphandir; 343 else 344 if (parent->dentry) 345 pdentry = parent->dentry; 346 else 347 goto out; 348 349 ret = clk_debug_create_subtree(clk, pdentry); 350 351 out: 352 return ret; 353 } 354 355 /** 356 * clk_debug_unregister - remove a clk node from the debugfs clk tree 357 * @clk: the clk being removed from the debugfs clk tree 358 * 359 * Dynamically removes a clk and all it's children clk nodes from the 360 * debugfs clk tree if clk->dentry points to debugfs created by 361 * clk_debug_register in __clk_init. 362 * 363 * Caller must hold prepare_lock. 364 */ 365 static void clk_debug_unregister(struct clk *clk) 366 { 367 debugfs_remove_recursive(clk->dentry); 368 } 369 370 /** 371 * clk_debug_reparent - reparent clk node in the debugfs clk tree 372 * @clk: the clk being reparented 373 * @new_parent: the new clk parent, may be NULL 374 * 375 * Rename clk entry in the debugfs clk tree if debugfs has been 376 * initialized. Otherwise it bails out early since the debugfs clk tree 377 * will be created lazily by clk_debug_init as part of a late_initcall. 378 * 379 * Caller must hold prepare_lock. 380 */ 381 static void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 382 { 383 struct dentry *d; 384 struct dentry *new_parent_d; 385 386 if (!inited) 387 return; 388 389 if (new_parent) 390 new_parent_d = new_parent->dentry; 391 else 392 new_parent_d = orphandir; 393 394 d = debugfs_rename(clk->dentry->d_parent, clk->dentry, 395 new_parent_d, clk->name); 396 if (d) 397 clk->dentry = d; 398 else 399 pr_debug("%s: failed to rename debugfs entry for %s\n", 400 __func__, clk->name); 401 } 402 403 /** 404 * clk_debug_init - lazily create the debugfs clk tree visualization 405 * 406 * clks are often initialized very early during boot before memory can 407 * be dynamically allocated and well before debugfs is setup. 408 * clk_debug_init walks the clk tree hierarchy while holding 409 * prepare_lock and creates the topology as part of a late_initcall, 410 * thus insuring that clks initialized very early will still be 411 * represented in the debugfs clk tree. This function should only be 412 * called once at boot-time, and all other clks added dynamically will 413 * be done so with clk_debug_register. 414 */ 415 static int __init clk_debug_init(void) 416 { 417 struct clk *clk; 418 struct dentry *d; 419 420 rootdir = debugfs_create_dir("clk", NULL); 421 422 if (!rootdir) 423 return -ENOMEM; 424 425 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL, 426 &clk_summary_fops); 427 if (!d) 428 return -ENOMEM; 429 430 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL, 431 &clk_dump_fops); 432 if (!d) 433 return -ENOMEM; 434 435 orphandir = debugfs_create_dir("orphans", rootdir); 436 437 if (!orphandir) 438 return -ENOMEM; 439 440 clk_prepare_lock(); 441 442 hlist_for_each_entry(clk, &clk_root_list, child_node) 443 clk_debug_create_subtree(clk, rootdir); 444 445 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 446 clk_debug_create_subtree(clk, orphandir); 447 448 inited = 1; 449 450 clk_prepare_unlock(); 451 452 return 0; 453 } 454 late_initcall(clk_debug_init); 455 #else 456 static inline int clk_debug_register(struct clk *clk) { return 0; } 457 static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 458 { 459 } 460 static inline void clk_debug_unregister(struct clk *clk) 461 { 462 } 463 #endif 464 465 /* caller must hold prepare_lock */ 466 static void clk_unprepare_unused_subtree(struct clk *clk) 467 { 468 struct clk *child; 469 470 if (!clk) 471 return; 472 473 hlist_for_each_entry(child, &clk->children, child_node) 474 clk_unprepare_unused_subtree(child); 475 476 if (clk->prepare_count) 477 return; 478 479 if (clk->flags & CLK_IGNORE_UNUSED) 480 return; 481 482 if (__clk_is_prepared(clk)) { 483 if (clk->ops->unprepare_unused) 484 clk->ops->unprepare_unused(clk->hw); 485 else if (clk->ops->unprepare) 486 clk->ops->unprepare(clk->hw); 487 } 488 } 489 490 /* caller must hold prepare_lock */ 491 static void clk_disable_unused_subtree(struct clk *clk) 492 { 493 struct clk *child; 494 unsigned long flags; 495 496 if (!clk) 497 goto out; 498 499 hlist_for_each_entry(child, &clk->children, child_node) 500 clk_disable_unused_subtree(child); 501 502 flags = clk_enable_lock(); 503 504 if (clk->enable_count) 505 goto unlock_out; 506 507 if (clk->flags & CLK_IGNORE_UNUSED) 508 goto unlock_out; 509 510 /* 511 * some gate clocks have special needs during the disable-unused 512 * sequence. call .disable_unused if available, otherwise fall 513 * back to .disable 514 */ 515 if (__clk_is_enabled(clk)) { 516 if (clk->ops->disable_unused) 517 clk->ops->disable_unused(clk->hw); 518 else if (clk->ops->disable) 519 clk->ops->disable(clk->hw); 520 } 521 522 unlock_out: 523 clk_enable_unlock(flags); 524 525 out: 526 return; 527 } 528 529 static bool clk_ignore_unused; 530 static int __init clk_ignore_unused_setup(char *__unused) 531 { 532 clk_ignore_unused = true; 533 return 1; 534 } 535 __setup("clk_ignore_unused", clk_ignore_unused_setup); 536 537 static int clk_disable_unused(void) 538 { 539 struct clk *clk; 540 541 if (clk_ignore_unused) { 542 pr_warn("clk: Not disabling unused clocks\n"); 543 return 0; 544 } 545 546 clk_prepare_lock(); 547 548 hlist_for_each_entry(clk, &clk_root_list, child_node) 549 clk_disable_unused_subtree(clk); 550 551 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 552 clk_disable_unused_subtree(clk); 553 554 hlist_for_each_entry(clk, &clk_root_list, child_node) 555 clk_unprepare_unused_subtree(clk); 556 557 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 558 clk_unprepare_unused_subtree(clk); 559 560 clk_prepare_unlock(); 561 562 return 0; 563 } 564 late_initcall_sync(clk_disable_unused); 565 566 /*** helper functions ***/ 567 568 const char *__clk_get_name(struct clk *clk) 569 { 570 return !clk ? NULL : clk->name; 571 } 572 EXPORT_SYMBOL_GPL(__clk_get_name); 573 574 struct clk_hw *__clk_get_hw(struct clk *clk) 575 { 576 return !clk ? NULL : clk->hw; 577 } 578 EXPORT_SYMBOL_GPL(__clk_get_hw); 579 580 u8 __clk_get_num_parents(struct clk *clk) 581 { 582 return !clk ? 0 : clk->num_parents; 583 } 584 EXPORT_SYMBOL_GPL(__clk_get_num_parents); 585 586 struct clk *__clk_get_parent(struct clk *clk) 587 { 588 return !clk ? NULL : clk->parent; 589 } 590 EXPORT_SYMBOL_GPL(__clk_get_parent); 591 592 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) 593 { 594 if (!clk || index >= clk->num_parents) 595 return NULL; 596 else if (!clk->parents) 597 return __clk_lookup(clk->parent_names[index]); 598 else if (!clk->parents[index]) 599 return clk->parents[index] = 600 __clk_lookup(clk->parent_names[index]); 601 else 602 return clk->parents[index]; 603 } 604 EXPORT_SYMBOL_GPL(clk_get_parent_by_index); 605 606 unsigned int __clk_get_enable_count(struct clk *clk) 607 { 608 return !clk ? 0 : clk->enable_count; 609 } 610 611 unsigned int __clk_get_prepare_count(struct clk *clk) 612 { 613 return !clk ? 0 : clk->prepare_count; 614 } 615 616 unsigned long __clk_get_rate(struct clk *clk) 617 { 618 unsigned long ret; 619 620 if (!clk) { 621 ret = 0; 622 goto out; 623 } 624 625 ret = clk->rate; 626 627 if (clk->flags & CLK_IS_ROOT) 628 goto out; 629 630 if (!clk->parent) 631 ret = 0; 632 633 out: 634 return ret; 635 } 636 EXPORT_SYMBOL_GPL(__clk_get_rate); 637 638 unsigned long __clk_get_accuracy(struct clk *clk) 639 { 640 if (!clk) 641 return 0; 642 643 return clk->accuracy; 644 } 645 646 unsigned long __clk_get_flags(struct clk *clk) 647 { 648 return !clk ? 0 : clk->flags; 649 } 650 EXPORT_SYMBOL_GPL(__clk_get_flags); 651 652 bool __clk_is_prepared(struct clk *clk) 653 { 654 int ret; 655 656 if (!clk) 657 return false; 658 659 /* 660 * .is_prepared is optional for clocks that can prepare 661 * fall back to software usage counter if it is missing 662 */ 663 if (!clk->ops->is_prepared) { 664 ret = clk->prepare_count ? 1 : 0; 665 goto out; 666 } 667 668 ret = clk->ops->is_prepared(clk->hw); 669 out: 670 return !!ret; 671 } 672 673 bool __clk_is_enabled(struct clk *clk) 674 { 675 int ret; 676 677 if (!clk) 678 return false; 679 680 /* 681 * .is_enabled is only mandatory for clocks that gate 682 * fall back to software usage counter if .is_enabled is missing 683 */ 684 if (!clk->ops->is_enabled) { 685 ret = clk->enable_count ? 1 : 0; 686 goto out; 687 } 688 689 ret = clk->ops->is_enabled(clk->hw); 690 out: 691 return !!ret; 692 } 693 EXPORT_SYMBOL_GPL(__clk_is_enabled); 694 695 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) 696 { 697 struct clk *child; 698 struct clk *ret; 699 700 if (!strcmp(clk->name, name)) 701 return clk; 702 703 hlist_for_each_entry(child, &clk->children, child_node) { 704 ret = __clk_lookup_subtree(name, child); 705 if (ret) 706 return ret; 707 } 708 709 return NULL; 710 } 711 712 struct clk *__clk_lookup(const char *name) 713 { 714 struct clk *root_clk; 715 struct clk *ret; 716 717 if (!name) 718 return NULL; 719 720 /* search the 'proper' clk tree first */ 721 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 722 ret = __clk_lookup_subtree(name, root_clk); 723 if (ret) 724 return ret; 725 } 726 727 /* if not found, then search the orphan tree */ 728 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 729 ret = __clk_lookup_subtree(name, root_clk); 730 if (ret) 731 return ret; 732 } 733 734 return NULL; 735 } 736 737 /* 738 * Helper for finding best parent to provide a given frequency. This can be used 739 * directly as a determine_rate callback (e.g. for a mux), or from a more 740 * complex clock that may combine a mux with other operations. 741 */ 742 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 743 unsigned long *best_parent_rate, 744 struct clk **best_parent_p) 745 { 746 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 747 int i, num_parents; 748 unsigned long parent_rate, best = 0; 749 750 /* if NO_REPARENT flag set, pass through to current parent */ 751 if (clk->flags & CLK_SET_RATE_NO_REPARENT) { 752 parent = clk->parent; 753 if (clk->flags & CLK_SET_RATE_PARENT) 754 best = __clk_round_rate(parent, rate); 755 else if (parent) 756 best = __clk_get_rate(parent); 757 else 758 best = __clk_get_rate(clk); 759 goto out; 760 } 761 762 /* find the parent that can provide the fastest rate <= rate */ 763 num_parents = clk->num_parents; 764 for (i = 0; i < num_parents; i++) { 765 parent = clk_get_parent_by_index(clk, i); 766 if (!parent) 767 continue; 768 if (clk->flags & CLK_SET_RATE_PARENT) 769 parent_rate = __clk_round_rate(parent, rate); 770 else 771 parent_rate = __clk_get_rate(parent); 772 if (parent_rate <= rate && parent_rate > best) { 773 best_parent = parent; 774 best = parent_rate; 775 } 776 } 777 778 out: 779 if (best_parent) 780 *best_parent_p = best_parent; 781 *best_parent_rate = best; 782 783 return best; 784 } 785 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 786 787 /*** clk api ***/ 788 789 void __clk_unprepare(struct clk *clk) 790 { 791 if (!clk) 792 return; 793 794 if (WARN_ON(clk->prepare_count == 0)) 795 return; 796 797 if (--clk->prepare_count > 0) 798 return; 799 800 WARN_ON(clk->enable_count > 0); 801 802 if (clk->ops->unprepare) 803 clk->ops->unprepare(clk->hw); 804 805 __clk_unprepare(clk->parent); 806 } 807 808 /** 809 * clk_unprepare - undo preparation of a clock source 810 * @clk: the clk being unprepared 811 * 812 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 813 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 814 * if the operation may sleep. One example is a clk which is accessed over 815 * I2c. In the complex case a clk gate operation may require a fast and a slow 816 * part. It is this reason that clk_unprepare and clk_disable are not mutually 817 * exclusive. In fact clk_disable must be called before clk_unprepare. 818 */ 819 void clk_unprepare(struct clk *clk) 820 { 821 clk_prepare_lock(); 822 __clk_unprepare(clk); 823 clk_prepare_unlock(); 824 } 825 EXPORT_SYMBOL_GPL(clk_unprepare); 826 827 int __clk_prepare(struct clk *clk) 828 { 829 int ret = 0; 830 831 if (!clk) 832 return 0; 833 834 if (clk->prepare_count == 0) { 835 ret = __clk_prepare(clk->parent); 836 if (ret) 837 return ret; 838 839 if (clk->ops->prepare) { 840 ret = clk->ops->prepare(clk->hw); 841 if (ret) { 842 __clk_unprepare(clk->parent); 843 return ret; 844 } 845 } 846 } 847 848 clk->prepare_count++; 849 850 return 0; 851 } 852 853 /** 854 * clk_prepare - prepare a clock source 855 * @clk: the clk being prepared 856 * 857 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 858 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 859 * operation may sleep. One example is a clk which is accessed over I2c. In 860 * the complex case a clk ungate operation may require a fast and a slow part. 861 * It is this reason that clk_prepare and clk_enable are not mutually 862 * exclusive. In fact clk_prepare must be called before clk_enable. 863 * Returns 0 on success, -EERROR otherwise. 864 */ 865 int clk_prepare(struct clk *clk) 866 { 867 int ret; 868 869 clk_prepare_lock(); 870 ret = __clk_prepare(clk); 871 clk_prepare_unlock(); 872 873 return ret; 874 } 875 EXPORT_SYMBOL_GPL(clk_prepare); 876 877 static void __clk_disable(struct clk *clk) 878 { 879 if (!clk) 880 return; 881 882 if (WARN_ON(IS_ERR(clk))) 883 return; 884 885 if (WARN_ON(clk->enable_count == 0)) 886 return; 887 888 if (--clk->enable_count > 0) 889 return; 890 891 if (clk->ops->disable) 892 clk->ops->disable(clk->hw); 893 894 __clk_disable(clk->parent); 895 } 896 897 /** 898 * clk_disable - gate a clock 899 * @clk: the clk being gated 900 * 901 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 902 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 903 * clk if the operation is fast and will never sleep. One example is a 904 * SoC-internal clk which is controlled via simple register writes. In the 905 * complex case a clk gate operation may require a fast and a slow part. It is 906 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 907 * In fact clk_disable must be called before clk_unprepare. 908 */ 909 void clk_disable(struct clk *clk) 910 { 911 unsigned long flags; 912 913 flags = clk_enable_lock(); 914 __clk_disable(clk); 915 clk_enable_unlock(flags); 916 } 917 EXPORT_SYMBOL_GPL(clk_disable); 918 919 static int __clk_enable(struct clk *clk) 920 { 921 int ret = 0; 922 923 if (!clk) 924 return 0; 925 926 if (WARN_ON(clk->prepare_count == 0)) 927 return -ESHUTDOWN; 928 929 if (clk->enable_count == 0) { 930 ret = __clk_enable(clk->parent); 931 932 if (ret) 933 return ret; 934 935 if (clk->ops->enable) { 936 ret = clk->ops->enable(clk->hw); 937 if (ret) { 938 __clk_disable(clk->parent); 939 return ret; 940 } 941 } 942 } 943 944 clk->enable_count++; 945 return 0; 946 } 947 948 /** 949 * clk_enable - ungate a clock 950 * @clk: the clk being ungated 951 * 952 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 953 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 954 * if the operation will never sleep. One example is a SoC-internal clk which 955 * is controlled via simple register writes. In the complex case a clk ungate 956 * operation may require a fast and a slow part. It is this reason that 957 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 958 * must be called before clk_enable. Returns 0 on success, -EERROR 959 * otherwise. 960 */ 961 int clk_enable(struct clk *clk) 962 { 963 unsigned long flags; 964 int ret; 965 966 flags = clk_enable_lock(); 967 ret = __clk_enable(clk); 968 clk_enable_unlock(flags); 969 970 return ret; 971 } 972 EXPORT_SYMBOL_GPL(clk_enable); 973 974 /** 975 * __clk_round_rate - round the given rate for a clk 976 * @clk: round the rate of this clock 977 * @rate: the rate which is to be rounded 978 * 979 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate 980 */ 981 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) 982 { 983 unsigned long parent_rate = 0; 984 struct clk *parent; 985 986 if (!clk) 987 return 0; 988 989 parent = clk->parent; 990 if (parent) 991 parent_rate = parent->rate; 992 993 if (clk->ops->determine_rate) 994 return clk->ops->determine_rate(clk->hw, rate, &parent_rate, 995 &parent); 996 else if (clk->ops->round_rate) 997 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 998 else if (clk->flags & CLK_SET_RATE_PARENT) 999 return __clk_round_rate(clk->parent, rate); 1000 else 1001 return clk->rate; 1002 } 1003 1004 /** 1005 * clk_round_rate - round the given rate for a clk 1006 * @clk: the clk for which we are rounding a rate 1007 * @rate: the rate which is to be rounded 1008 * 1009 * Takes in a rate as input and rounds it to a rate that the clk can actually 1010 * use which is then returned. If clk doesn't support round_rate operation 1011 * then the parent rate is returned. 1012 */ 1013 long clk_round_rate(struct clk *clk, unsigned long rate) 1014 { 1015 unsigned long ret; 1016 1017 clk_prepare_lock(); 1018 ret = __clk_round_rate(clk, rate); 1019 clk_prepare_unlock(); 1020 1021 return ret; 1022 } 1023 EXPORT_SYMBOL_GPL(clk_round_rate); 1024 1025 /** 1026 * __clk_notify - call clk notifier chain 1027 * @clk: struct clk * that is changing rate 1028 * @msg: clk notifier type (see include/linux/clk.h) 1029 * @old_rate: old clk rate 1030 * @new_rate: new clk rate 1031 * 1032 * Triggers a notifier call chain on the clk rate-change notification 1033 * for 'clk'. Passes a pointer to the struct clk and the previous 1034 * and current rates to the notifier callback. Intended to be called by 1035 * internal clock code only. Returns NOTIFY_DONE from the last driver 1036 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1037 * a driver returns that. 1038 */ 1039 static int __clk_notify(struct clk *clk, unsigned long msg, 1040 unsigned long old_rate, unsigned long new_rate) 1041 { 1042 struct clk_notifier *cn; 1043 struct clk_notifier_data cnd; 1044 int ret = NOTIFY_DONE; 1045 1046 cnd.clk = clk; 1047 cnd.old_rate = old_rate; 1048 cnd.new_rate = new_rate; 1049 1050 list_for_each_entry(cn, &clk_notifier_list, node) { 1051 if (cn->clk == clk) { 1052 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1053 &cnd); 1054 break; 1055 } 1056 } 1057 1058 return ret; 1059 } 1060 1061 /** 1062 * __clk_recalc_accuracies 1063 * @clk: first clk in the subtree 1064 * 1065 * Walks the subtree of clks starting with clk and recalculates accuracies as 1066 * it goes. Note that if a clk does not implement the .recalc_accuracy 1067 * callback then it is assumed that the clock will take on the accuracy of it's 1068 * parent. 1069 * 1070 * Caller must hold prepare_lock. 1071 */ 1072 static void __clk_recalc_accuracies(struct clk *clk) 1073 { 1074 unsigned long parent_accuracy = 0; 1075 struct clk *child; 1076 1077 if (clk->parent) 1078 parent_accuracy = clk->parent->accuracy; 1079 1080 if (clk->ops->recalc_accuracy) 1081 clk->accuracy = clk->ops->recalc_accuracy(clk->hw, 1082 parent_accuracy); 1083 else 1084 clk->accuracy = parent_accuracy; 1085 1086 hlist_for_each_entry(child, &clk->children, child_node) 1087 __clk_recalc_accuracies(child); 1088 } 1089 1090 /** 1091 * clk_get_accuracy - return the accuracy of clk 1092 * @clk: the clk whose accuracy is being returned 1093 * 1094 * Simply returns the cached accuracy of the clk, unless 1095 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1096 * issued. 1097 * If clk is NULL then returns 0. 1098 */ 1099 long clk_get_accuracy(struct clk *clk) 1100 { 1101 unsigned long accuracy; 1102 1103 clk_prepare_lock(); 1104 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE)) 1105 __clk_recalc_accuracies(clk); 1106 1107 accuracy = __clk_get_accuracy(clk); 1108 clk_prepare_unlock(); 1109 1110 return accuracy; 1111 } 1112 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1113 1114 /** 1115 * __clk_recalc_rates 1116 * @clk: first clk in the subtree 1117 * @msg: notification type (see include/linux/clk.h) 1118 * 1119 * Walks the subtree of clks starting with clk and recalculates rates as it 1120 * goes. Note that if a clk does not implement the .recalc_rate callback then 1121 * it is assumed that the clock will take on the rate of its parent. 1122 * 1123 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1124 * if necessary. 1125 * 1126 * Caller must hold prepare_lock. 1127 */ 1128 static void __clk_recalc_rates(struct clk *clk, unsigned long msg) 1129 { 1130 unsigned long old_rate; 1131 unsigned long parent_rate = 0; 1132 struct clk *child; 1133 1134 old_rate = clk->rate; 1135 1136 if (clk->parent) 1137 parent_rate = clk->parent->rate; 1138 1139 if (clk->ops->recalc_rate) 1140 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate); 1141 else 1142 clk->rate = parent_rate; 1143 1144 /* 1145 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1146 * & ABORT_RATE_CHANGE notifiers 1147 */ 1148 if (clk->notifier_count && msg) 1149 __clk_notify(clk, msg, old_rate, clk->rate); 1150 1151 hlist_for_each_entry(child, &clk->children, child_node) 1152 __clk_recalc_rates(child, msg); 1153 } 1154 1155 /** 1156 * clk_get_rate - return the rate of clk 1157 * @clk: the clk whose rate is being returned 1158 * 1159 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1160 * is set, which means a recalc_rate will be issued. 1161 * If clk is NULL then returns 0. 1162 */ 1163 unsigned long clk_get_rate(struct clk *clk) 1164 { 1165 unsigned long rate; 1166 1167 clk_prepare_lock(); 1168 1169 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) 1170 __clk_recalc_rates(clk, 0); 1171 1172 rate = __clk_get_rate(clk); 1173 clk_prepare_unlock(); 1174 1175 return rate; 1176 } 1177 EXPORT_SYMBOL_GPL(clk_get_rate); 1178 1179 static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) 1180 { 1181 int i; 1182 1183 if (!clk->parents) { 1184 clk->parents = kcalloc(clk->num_parents, 1185 sizeof(struct clk *), GFP_KERNEL); 1186 if (!clk->parents) 1187 return -ENOMEM; 1188 } 1189 1190 /* 1191 * find index of new parent clock using cached parent ptrs, 1192 * or if not yet cached, use string name comparison and cache 1193 * them now to avoid future calls to __clk_lookup. 1194 */ 1195 for (i = 0; i < clk->num_parents; i++) { 1196 if (clk->parents[i] == parent) 1197 return i; 1198 1199 if (clk->parents[i]) 1200 continue; 1201 1202 if (!strcmp(clk->parent_names[i], parent->name)) { 1203 clk->parents[i] = __clk_lookup(parent->name); 1204 return i; 1205 } 1206 } 1207 1208 return -EINVAL; 1209 } 1210 1211 static void clk_reparent(struct clk *clk, struct clk *new_parent) 1212 { 1213 hlist_del(&clk->child_node); 1214 1215 if (new_parent) { 1216 /* avoid duplicate POST_RATE_CHANGE notifications */ 1217 if (new_parent->new_child == clk) 1218 new_parent->new_child = NULL; 1219 1220 hlist_add_head(&clk->child_node, &new_parent->children); 1221 } else { 1222 hlist_add_head(&clk->child_node, &clk_orphan_list); 1223 } 1224 1225 clk->parent = new_parent; 1226 } 1227 1228 static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent) 1229 { 1230 unsigned long flags; 1231 struct clk *old_parent = clk->parent; 1232 1233 /* 1234 * Migrate prepare state between parents and prevent race with 1235 * clk_enable(). 1236 * 1237 * If the clock is not prepared, then a race with 1238 * clk_enable/disable() is impossible since we already have the 1239 * prepare lock (future calls to clk_enable() need to be preceded by 1240 * a clk_prepare()). 1241 * 1242 * If the clock is prepared, migrate the prepared state to the new 1243 * parent and also protect against a race with clk_enable() by 1244 * forcing the clock and the new parent on. This ensures that all 1245 * future calls to clk_enable() are practically NOPs with respect to 1246 * hardware and software states. 1247 * 1248 * See also: Comment for clk_set_parent() below. 1249 */ 1250 if (clk->prepare_count) { 1251 __clk_prepare(parent); 1252 clk_enable(parent); 1253 clk_enable(clk); 1254 } 1255 1256 /* update the clk tree topology */ 1257 flags = clk_enable_lock(); 1258 clk_reparent(clk, parent); 1259 clk_enable_unlock(flags); 1260 1261 return old_parent; 1262 } 1263 1264 static void __clk_set_parent_after(struct clk *clk, struct clk *parent, 1265 struct clk *old_parent) 1266 { 1267 /* 1268 * Finish the migration of prepare state and undo the changes done 1269 * for preventing a race with clk_enable(). 1270 */ 1271 if (clk->prepare_count) { 1272 clk_disable(clk); 1273 clk_disable(old_parent); 1274 __clk_unprepare(old_parent); 1275 } 1276 1277 /* update debugfs with new clk tree topology */ 1278 clk_debug_reparent(clk, parent); 1279 } 1280 1281 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) 1282 { 1283 unsigned long flags; 1284 int ret = 0; 1285 struct clk *old_parent; 1286 1287 old_parent = __clk_set_parent_before(clk, parent); 1288 1289 /* change clock input source */ 1290 if (parent && clk->ops->set_parent) 1291 ret = clk->ops->set_parent(clk->hw, p_index); 1292 1293 if (ret) { 1294 flags = clk_enable_lock(); 1295 clk_reparent(clk, old_parent); 1296 clk_enable_unlock(flags); 1297 1298 if (clk->prepare_count) { 1299 clk_disable(clk); 1300 clk_disable(parent); 1301 __clk_unprepare(parent); 1302 } 1303 return ret; 1304 } 1305 1306 __clk_set_parent_after(clk, parent, old_parent); 1307 1308 return 0; 1309 } 1310 1311 /** 1312 * __clk_speculate_rates 1313 * @clk: first clk in the subtree 1314 * @parent_rate: the "future" rate of clk's parent 1315 * 1316 * Walks the subtree of clks starting with clk, speculating rates as it 1317 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1318 * 1319 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1320 * pre-rate change notifications and returns early if no clks in the 1321 * subtree have subscribed to the notifications. Note that if a clk does not 1322 * implement the .recalc_rate callback then it is assumed that the clock will 1323 * take on the rate of its parent. 1324 * 1325 * Caller must hold prepare_lock. 1326 */ 1327 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 1328 { 1329 struct clk *child; 1330 unsigned long new_rate; 1331 int ret = NOTIFY_DONE; 1332 1333 if (clk->ops->recalc_rate) 1334 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate); 1335 else 1336 new_rate = parent_rate; 1337 1338 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1339 if (clk->notifier_count) 1340 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); 1341 1342 if (ret & NOTIFY_STOP_MASK) 1343 goto out; 1344 1345 hlist_for_each_entry(child, &clk->children, child_node) { 1346 ret = __clk_speculate_rates(child, new_rate); 1347 if (ret & NOTIFY_STOP_MASK) 1348 break; 1349 } 1350 1351 out: 1352 return ret; 1353 } 1354 1355 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, 1356 struct clk *new_parent, u8 p_index) 1357 { 1358 struct clk *child; 1359 1360 clk->new_rate = new_rate; 1361 clk->new_parent = new_parent; 1362 clk->new_parent_index = p_index; 1363 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1364 clk->new_child = NULL; 1365 if (new_parent && new_parent != clk->parent) 1366 new_parent->new_child = clk; 1367 1368 hlist_for_each_entry(child, &clk->children, child_node) { 1369 if (child->ops->recalc_rate) 1370 child->new_rate = child->ops->recalc_rate(child->hw, new_rate); 1371 else 1372 child->new_rate = new_rate; 1373 clk_calc_subtree(child, child->new_rate, NULL, 0); 1374 } 1375 } 1376 1377 /* 1378 * calculate the new rates returning the topmost clock that has to be 1379 * changed. 1380 */ 1381 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) 1382 { 1383 struct clk *top = clk; 1384 struct clk *old_parent, *parent; 1385 unsigned long best_parent_rate = 0; 1386 unsigned long new_rate; 1387 int p_index = 0; 1388 1389 /* sanity */ 1390 if (IS_ERR_OR_NULL(clk)) 1391 return NULL; 1392 1393 /* save parent rate, if it exists */ 1394 parent = old_parent = clk->parent; 1395 if (parent) 1396 best_parent_rate = parent->rate; 1397 1398 /* find the closest rate and parent clk/rate */ 1399 if (clk->ops->determine_rate) { 1400 new_rate = clk->ops->determine_rate(clk->hw, rate, 1401 &best_parent_rate, 1402 &parent); 1403 } else if (clk->ops->round_rate) { 1404 new_rate = clk->ops->round_rate(clk->hw, rate, 1405 &best_parent_rate); 1406 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { 1407 /* pass-through clock without adjustable parent */ 1408 clk->new_rate = clk->rate; 1409 return NULL; 1410 } else { 1411 /* pass-through clock with adjustable parent */ 1412 top = clk_calc_new_rates(parent, rate); 1413 new_rate = parent->new_rate; 1414 goto out; 1415 } 1416 1417 /* some clocks must be gated to change parent */ 1418 if (parent != old_parent && 1419 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1420 pr_debug("%s: %s not gated but wants to reparent\n", 1421 __func__, clk->name); 1422 return NULL; 1423 } 1424 1425 /* try finding the new parent index */ 1426 if (parent) { 1427 p_index = clk_fetch_parent_index(clk, parent); 1428 if (p_index < 0) { 1429 pr_debug("%s: clk %s can not be parent of clk %s\n", 1430 __func__, parent->name, clk->name); 1431 return NULL; 1432 } 1433 } 1434 1435 if ((clk->flags & CLK_SET_RATE_PARENT) && parent && 1436 best_parent_rate != parent->rate) 1437 top = clk_calc_new_rates(parent, best_parent_rate); 1438 1439 out: 1440 clk_calc_subtree(clk, new_rate, parent, p_index); 1441 1442 return top; 1443 } 1444 1445 /* 1446 * Notify about rate changes in a subtree. Always walk down the whole tree 1447 * so that in case of an error we can walk down the whole tree again and 1448 * abort the change. 1449 */ 1450 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 1451 { 1452 struct clk *child, *tmp_clk, *fail_clk = NULL; 1453 int ret = NOTIFY_DONE; 1454 1455 if (clk->rate == clk->new_rate) 1456 return NULL; 1457 1458 if (clk->notifier_count) { 1459 ret = __clk_notify(clk, event, clk->rate, clk->new_rate); 1460 if (ret & NOTIFY_STOP_MASK) 1461 fail_clk = clk; 1462 } 1463 1464 hlist_for_each_entry(child, &clk->children, child_node) { 1465 /* Skip children who will be reparented to another clock */ 1466 if (child->new_parent && child->new_parent != clk) 1467 continue; 1468 tmp_clk = clk_propagate_rate_change(child, event); 1469 if (tmp_clk) 1470 fail_clk = tmp_clk; 1471 } 1472 1473 /* handle the new child who might not be in clk->children yet */ 1474 if (clk->new_child) { 1475 tmp_clk = clk_propagate_rate_change(clk->new_child, event); 1476 if (tmp_clk) 1477 fail_clk = tmp_clk; 1478 } 1479 1480 return fail_clk; 1481 } 1482 1483 /* 1484 * walk down a subtree and set the new rates notifying the rate 1485 * change on the way 1486 */ 1487 static void clk_change_rate(struct clk *clk) 1488 { 1489 struct clk *child; 1490 unsigned long old_rate; 1491 unsigned long best_parent_rate = 0; 1492 bool skip_set_rate = false; 1493 struct clk *old_parent; 1494 1495 old_rate = clk->rate; 1496 1497 if (clk->new_parent) 1498 best_parent_rate = clk->new_parent->rate; 1499 else if (clk->parent) 1500 best_parent_rate = clk->parent->rate; 1501 1502 if (clk->new_parent && clk->new_parent != clk->parent) { 1503 old_parent = __clk_set_parent_before(clk, clk->new_parent); 1504 1505 if (clk->ops->set_rate_and_parent) { 1506 skip_set_rate = true; 1507 clk->ops->set_rate_and_parent(clk->hw, clk->new_rate, 1508 best_parent_rate, 1509 clk->new_parent_index); 1510 } else if (clk->ops->set_parent) { 1511 clk->ops->set_parent(clk->hw, clk->new_parent_index); 1512 } 1513 1514 __clk_set_parent_after(clk, clk->new_parent, old_parent); 1515 } 1516 1517 if (!skip_set_rate && clk->ops->set_rate) 1518 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate); 1519 1520 if (clk->ops->recalc_rate) 1521 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate); 1522 else 1523 clk->rate = best_parent_rate; 1524 1525 if (clk->notifier_count && old_rate != clk->rate) 1526 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 1527 1528 hlist_for_each_entry(child, &clk->children, child_node) { 1529 /* Skip children who will be reparented to another clock */ 1530 if (child->new_parent && child->new_parent != clk) 1531 continue; 1532 clk_change_rate(child); 1533 } 1534 1535 /* handle the new child who might not be in clk->children yet */ 1536 if (clk->new_child) 1537 clk_change_rate(clk->new_child); 1538 } 1539 1540 /** 1541 * clk_set_rate - specify a new rate for clk 1542 * @clk: the clk whose rate is being changed 1543 * @rate: the new rate for clk 1544 * 1545 * In the simplest case clk_set_rate will only adjust the rate of clk. 1546 * 1547 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1548 * propagate up to clk's parent; whether or not this happens depends on the 1549 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1550 * after calling .round_rate then upstream parent propagation is ignored. If 1551 * *parent_rate comes back with a new rate for clk's parent then we propagate 1552 * up to clk's parent and set its rate. Upward propagation will continue 1553 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1554 * .round_rate stops requesting changes to clk's parent_rate. 1555 * 1556 * Rate changes are accomplished via tree traversal that also recalculates the 1557 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1558 * 1559 * Returns 0 on success, -EERROR otherwise. 1560 */ 1561 int clk_set_rate(struct clk *clk, unsigned long rate) 1562 { 1563 struct clk *top, *fail_clk; 1564 int ret = 0; 1565 1566 if (!clk) 1567 return 0; 1568 1569 /* prevent racing with updates to the clock topology */ 1570 clk_prepare_lock(); 1571 1572 /* bail early if nothing to do */ 1573 if (rate == clk_get_rate(clk)) 1574 goto out; 1575 1576 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { 1577 ret = -EBUSY; 1578 goto out; 1579 } 1580 1581 /* calculate new rates and get the topmost changed clock */ 1582 top = clk_calc_new_rates(clk, rate); 1583 if (!top) { 1584 ret = -EINVAL; 1585 goto out; 1586 } 1587 1588 /* notify that we are about to change rates */ 1589 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1590 if (fail_clk) { 1591 pr_warn("%s: failed to set %s rate\n", __func__, 1592 fail_clk->name); 1593 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1594 ret = -EBUSY; 1595 goto out; 1596 } 1597 1598 /* change the rates */ 1599 clk_change_rate(top); 1600 1601 out: 1602 clk_prepare_unlock(); 1603 1604 return ret; 1605 } 1606 EXPORT_SYMBOL_GPL(clk_set_rate); 1607 1608 /** 1609 * clk_get_parent - return the parent of a clk 1610 * @clk: the clk whose parent gets returned 1611 * 1612 * Simply returns clk->parent. Returns NULL if clk is NULL. 1613 */ 1614 struct clk *clk_get_parent(struct clk *clk) 1615 { 1616 struct clk *parent; 1617 1618 clk_prepare_lock(); 1619 parent = __clk_get_parent(clk); 1620 clk_prepare_unlock(); 1621 1622 return parent; 1623 } 1624 EXPORT_SYMBOL_GPL(clk_get_parent); 1625 1626 /* 1627 * .get_parent is mandatory for clocks with multiple possible parents. It is 1628 * optional for single-parent clocks. Always call .get_parent if it is 1629 * available and WARN if it is missing for multi-parent clocks. 1630 * 1631 * For single-parent clocks without .get_parent, first check to see if the 1632 * .parents array exists, and if so use it to avoid an expensive tree 1633 * traversal. If .parents does not exist then walk the tree with __clk_lookup. 1634 */ 1635 static struct clk *__clk_init_parent(struct clk *clk) 1636 { 1637 struct clk *ret = NULL; 1638 u8 index; 1639 1640 /* handle the trivial cases */ 1641 1642 if (!clk->num_parents) 1643 goto out; 1644 1645 if (clk->num_parents == 1) { 1646 if (IS_ERR_OR_NULL(clk->parent)) 1647 ret = clk->parent = __clk_lookup(clk->parent_names[0]); 1648 ret = clk->parent; 1649 goto out; 1650 } 1651 1652 if (!clk->ops->get_parent) { 1653 WARN(!clk->ops->get_parent, 1654 "%s: multi-parent clocks must implement .get_parent\n", 1655 __func__); 1656 goto out; 1657 }; 1658 1659 /* 1660 * Do our best to cache parent clocks in clk->parents. This prevents 1661 * unnecessary and expensive calls to __clk_lookup. We don't set 1662 * clk->parent here; that is done by the calling function 1663 */ 1664 1665 index = clk->ops->get_parent(clk->hw); 1666 1667 if (!clk->parents) 1668 clk->parents = 1669 kcalloc(clk->num_parents, sizeof(struct clk *), 1670 GFP_KERNEL); 1671 1672 ret = clk_get_parent_by_index(clk, index); 1673 1674 out: 1675 return ret; 1676 } 1677 1678 void __clk_reparent(struct clk *clk, struct clk *new_parent) 1679 { 1680 clk_reparent(clk, new_parent); 1681 clk_debug_reparent(clk, new_parent); 1682 __clk_recalc_accuracies(clk); 1683 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1684 } 1685 1686 /** 1687 * clk_set_parent - switch the parent of a mux clk 1688 * @clk: the mux clk whose input we are switching 1689 * @parent: the new input to clk 1690 * 1691 * Re-parent clk to use parent as its new input source. If clk is in 1692 * prepared state, the clk will get enabled for the duration of this call. If 1693 * that's not acceptable for a specific clk (Eg: the consumer can't handle 1694 * that, the reparenting is glitchy in hardware, etc), use the 1695 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 1696 * 1697 * After successfully changing clk's parent clk_set_parent will update the 1698 * clk topology, sysfs topology and propagate rate recalculation via 1699 * __clk_recalc_rates. 1700 * 1701 * Returns 0 on success, -EERROR otherwise. 1702 */ 1703 int clk_set_parent(struct clk *clk, struct clk *parent) 1704 { 1705 int ret = 0; 1706 int p_index = 0; 1707 unsigned long p_rate = 0; 1708 1709 if (!clk) 1710 return 0; 1711 1712 if (!clk->ops) 1713 return -EINVAL; 1714 1715 /* verify ops for for multi-parent clks */ 1716 if ((clk->num_parents > 1) && (!clk->ops->set_parent)) 1717 return -ENOSYS; 1718 1719 /* prevent racing with updates to the clock topology */ 1720 clk_prepare_lock(); 1721 1722 if (clk->parent == parent) 1723 goto out; 1724 1725 /* check that we are allowed to re-parent if the clock is in use */ 1726 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1727 ret = -EBUSY; 1728 goto out; 1729 } 1730 1731 /* try finding the new parent index */ 1732 if (parent) { 1733 p_index = clk_fetch_parent_index(clk, parent); 1734 p_rate = parent->rate; 1735 if (p_index < 0) { 1736 pr_debug("%s: clk %s can not be parent of clk %s\n", 1737 __func__, parent->name, clk->name); 1738 ret = p_index; 1739 goto out; 1740 } 1741 } 1742 1743 /* propagate PRE_RATE_CHANGE notifications */ 1744 ret = __clk_speculate_rates(clk, p_rate); 1745 1746 /* abort if a driver objects */ 1747 if (ret & NOTIFY_STOP_MASK) 1748 goto out; 1749 1750 /* do the re-parent */ 1751 ret = __clk_set_parent(clk, parent, p_index); 1752 1753 /* propagate rate an accuracy recalculation accordingly */ 1754 if (ret) { 1755 __clk_recalc_rates(clk, ABORT_RATE_CHANGE); 1756 } else { 1757 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1758 __clk_recalc_accuracies(clk); 1759 } 1760 1761 out: 1762 clk_prepare_unlock(); 1763 1764 return ret; 1765 } 1766 EXPORT_SYMBOL_GPL(clk_set_parent); 1767 1768 /** 1769 * __clk_init - initialize the data structures in a struct clk 1770 * @dev: device initializing this clk, placeholder for now 1771 * @clk: clk being initialized 1772 * 1773 * Initializes the lists in struct clk, queries the hardware for the 1774 * parent and rate and sets them both. 1775 */ 1776 int __clk_init(struct device *dev, struct clk *clk) 1777 { 1778 int i, ret = 0; 1779 struct clk *orphan; 1780 struct hlist_node *tmp2; 1781 1782 if (!clk) 1783 return -EINVAL; 1784 1785 clk_prepare_lock(); 1786 1787 /* check to see if a clock with this name is already registered */ 1788 if (__clk_lookup(clk->name)) { 1789 pr_debug("%s: clk %s already initialized\n", 1790 __func__, clk->name); 1791 ret = -EEXIST; 1792 goto out; 1793 } 1794 1795 /* check that clk_ops are sane. See Documentation/clk.txt */ 1796 if (clk->ops->set_rate && 1797 !((clk->ops->round_rate || clk->ops->determine_rate) && 1798 clk->ops->recalc_rate)) { 1799 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 1800 __func__, clk->name); 1801 ret = -EINVAL; 1802 goto out; 1803 } 1804 1805 if (clk->ops->set_parent && !clk->ops->get_parent) { 1806 pr_warning("%s: %s must implement .get_parent & .set_parent\n", 1807 __func__, clk->name); 1808 ret = -EINVAL; 1809 goto out; 1810 } 1811 1812 if (clk->ops->set_rate_and_parent && 1813 !(clk->ops->set_parent && clk->ops->set_rate)) { 1814 pr_warn("%s: %s must implement .set_parent & .set_rate\n", 1815 __func__, clk->name); 1816 ret = -EINVAL; 1817 goto out; 1818 } 1819 1820 /* throw a WARN if any entries in parent_names are NULL */ 1821 for (i = 0; i < clk->num_parents; i++) 1822 WARN(!clk->parent_names[i], 1823 "%s: invalid NULL in %s's .parent_names\n", 1824 __func__, clk->name); 1825 1826 /* 1827 * Allocate an array of struct clk *'s to avoid unnecessary string 1828 * look-ups of clk's possible parents. This can fail for clocks passed 1829 * in to clk_init during early boot; thus any access to clk->parents[] 1830 * must always check for a NULL pointer and try to populate it if 1831 * necessary. 1832 * 1833 * If clk->parents is not NULL we skip this entire block. This allows 1834 * for clock drivers to statically initialize clk->parents. 1835 */ 1836 if (clk->num_parents > 1 && !clk->parents) { 1837 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), 1838 GFP_KERNEL); 1839 /* 1840 * __clk_lookup returns NULL for parents that have not been 1841 * clk_init'd; thus any access to clk->parents[] must check 1842 * for a NULL pointer. We can always perform lazy lookups for 1843 * missing parents later on. 1844 */ 1845 if (clk->parents) 1846 for (i = 0; i < clk->num_parents; i++) 1847 clk->parents[i] = 1848 __clk_lookup(clk->parent_names[i]); 1849 } 1850 1851 clk->parent = __clk_init_parent(clk); 1852 1853 /* 1854 * Populate clk->parent if parent has already been __clk_init'd. If 1855 * parent has not yet been __clk_init'd then place clk in the orphan 1856 * list. If clk has set the CLK_IS_ROOT flag then place it in the root 1857 * clk list. 1858 * 1859 * Every time a new clk is clk_init'd then we walk the list of orphan 1860 * clocks and re-parent any that are children of the clock currently 1861 * being clk_init'd. 1862 */ 1863 if (clk->parent) 1864 hlist_add_head(&clk->child_node, 1865 &clk->parent->children); 1866 else if (clk->flags & CLK_IS_ROOT) 1867 hlist_add_head(&clk->child_node, &clk_root_list); 1868 else 1869 hlist_add_head(&clk->child_node, &clk_orphan_list); 1870 1871 /* 1872 * Set clk's accuracy. The preferred method is to use 1873 * .recalc_accuracy. For simple clocks and lazy developers the default 1874 * fallback is to use the parent's accuracy. If a clock doesn't have a 1875 * parent (or is orphaned) then accuracy is set to zero (perfect 1876 * clock). 1877 */ 1878 if (clk->ops->recalc_accuracy) 1879 clk->accuracy = clk->ops->recalc_accuracy(clk->hw, 1880 __clk_get_accuracy(clk->parent)); 1881 else if (clk->parent) 1882 clk->accuracy = clk->parent->accuracy; 1883 else 1884 clk->accuracy = 0; 1885 1886 /* 1887 * Set clk's rate. The preferred method is to use .recalc_rate. For 1888 * simple clocks and lazy developers the default fallback is to use the 1889 * parent's rate. If a clock doesn't have a parent (or is orphaned) 1890 * then rate is set to zero. 1891 */ 1892 if (clk->ops->recalc_rate) 1893 clk->rate = clk->ops->recalc_rate(clk->hw, 1894 __clk_get_rate(clk->parent)); 1895 else if (clk->parent) 1896 clk->rate = clk->parent->rate; 1897 else 1898 clk->rate = 0; 1899 1900 clk_debug_register(clk); 1901 /* 1902 * walk the list of orphan clocks and reparent any that are children of 1903 * this clock 1904 */ 1905 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 1906 if (orphan->num_parents && orphan->ops->get_parent) { 1907 i = orphan->ops->get_parent(orphan->hw); 1908 if (!strcmp(clk->name, orphan->parent_names[i])) 1909 __clk_reparent(orphan, clk); 1910 continue; 1911 } 1912 1913 for (i = 0; i < orphan->num_parents; i++) 1914 if (!strcmp(clk->name, orphan->parent_names[i])) { 1915 __clk_reparent(orphan, clk); 1916 break; 1917 } 1918 } 1919 1920 /* 1921 * optional platform-specific magic 1922 * 1923 * The .init callback is not used by any of the basic clock types, but 1924 * exists for weird hardware that must perform initialization magic. 1925 * Please consider other ways of solving initialization problems before 1926 * using this callback, as its use is discouraged. 1927 */ 1928 if (clk->ops->init) 1929 clk->ops->init(clk->hw); 1930 1931 kref_init(&clk->ref); 1932 out: 1933 clk_prepare_unlock(); 1934 1935 return ret; 1936 } 1937 1938 /** 1939 * __clk_register - register a clock and return a cookie. 1940 * 1941 * Same as clk_register, except that the .clk field inside hw shall point to a 1942 * preallocated (generally statically allocated) struct clk. None of the fields 1943 * of the struct clk need to be initialized. 1944 * 1945 * The data pointed to by .init and .clk field shall NOT be marked as init 1946 * data. 1947 * 1948 * __clk_register is only exposed via clk-private.h and is intended for use with 1949 * very large numbers of clocks that need to be statically initialized. It is 1950 * a layering violation to include clk-private.h from any code which implements 1951 * a clock's .ops; as such any statically initialized clock data MUST be in a 1952 * separate C file from the logic that implements its operations. Returns 0 1953 * on success, otherwise an error code. 1954 */ 1955 struct clk *__clk_register(struct device *dev, struct clk_hw *hw) 1956 { 1957 int ret; 1958 struct clk *clk; 1959 1960 clk = hw->clk; 1961 clk->name = hw->init->name; 1962 clk->ops = hw->init->ops; 1963 clk->hw = hw; 1964 clk->flags = hw->init->flags; 1965 clk->parent_names = hw->init->parent_names; 1966 clk->num_parents = hw->init->num_parents; 1967 if (dev && dev->driver) 1968 clk->owner = dev->driver->owner; 1969 else 1970 clk->owner = NULL; 1971 1972 ret = __clk_init(dev, clk); 1973 if (ret) 1974 return ERR_PTR(ret); 1975 1976 return clk; 1977 } 1978 EXPORT_SYMBOL_GPL(__clk_register); 1979 1980 static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) 1981 { 1982 int i, ret; 1983 1984 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 1985 if (!clk->name) { 1986 pr_err("%s: could not allocate clk->name\n", __func__); 1987 ret = -ENOMEM; 1988 goto fail_name; 1989 } 1990 clk->ops = hw->init->ops; 1991 if (dev && dev->driver) 1992 clk->owner = dev->driver->owner; 1993 clk->hw = hw; 1994 clk->flags = hw->init->flags; 1995 clk->num_parents = hw->init->num_parents; 1996 hw->clk = clk; 1997 1998 /* allocate local copy in case parent_names is __initdata */ 1999 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), 2000 GFP_KERNEL); 2001 2002 if (!clk->parent_names) { 2003 pr_err("%s: could not allocate clk->parent_names\n", __func__); 2004 ret = -ENOMEM; 2005 goto fail_parent_names; 2006 } 2007 2008 2009 /* copy each string name in case parent_names is __initdata */ 2010 for (i = 0; i < clk->num_parents; i++) { 2011 clk->parent_names[i] = kstrdup(hw->init->parent_names[i], 2012 GFP_KERNEL); 2013 if (!clk->parent_names[i]) { 2014 pr_err("%s: could not copy parent_names\n", __func__); 2015 ret = -ENOMEM; 2016 goto fail_parent_names_copy; 2017 } 2018 } 2019 2020 ret = __clk_init(dev, clk); 2021 if (!ret) 2022 return 0; 2023 2024 fail_parent_names_copy: 2025 while (--i >= 0) 2026 kfree(clk->parent_names[i]); 2027 kfree(clk->parent_names); 2028 fail_parent_names: 2029 kfree(clk->name); 2030 fail_name: 2031 return ret; 2032 } 2033 2034 /** 2035 * clk_register - allocate a new clock, register it and return an opaque cookie 2036 * @dev: device that is registering this clock 2037 * @hw: link to hardware-specific clock data 2038 * 2039 * clk_register is the primary interface for populating the clock tree with new 2040 * clock nodes. It returns a pointer to the newly allocated struct clk which 2041 * cannot be dereferenced by driver code but may be used in conjuction with the 2042 * rest of the clock API. In the event of an error clk_register will return an 2043 * error code; drivers must test for an error code after calling clk_register. 2044 */ 2045 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2046 { 2047 int ret; 2048 struct clk *clk; 2049 2050 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2051 if (!clk) { 2052 pr_err("%s: could not allocate clk\n", __func__); 2053 ret = -ENOMEM; 2054 goto fail_out; 2055 } 2056 2057 ret = _clk_register(dev, hw, clk); 2058 if (!ret) 2059 return clk; 2060 2061 kfree(clk); 2062 fail_out: 2063 return ERR_PTR(ret); 2064 } 2065 EXPORT_SYMBOL_GPL(clk_register); 2066 2067 /* 2068 * Free memory allocated for a clock. 2069 * Caller must hold prepare_lock. 2070 */ 2071 static void __clk_release(struct kref *ref) 2072 { 2073 struct clk *clk = container_of(ref, struct clk, ref); 2074 int i = clk->num_parents; 2075 2076 kfree(clk->parents); 2077 while (--i >= 0) 2078 kfree(clk->parent_names[i]); 2079 2080 kfree(clk->parent_names); 2081 kfree(clk->name); 2082 kfree(clk); 2083 } 2084 2085 /* 2086 * Empty clk_ops for unregistered clocks. These are used temporarily 2087 * after clk_unregister() was called on a clock and until last clock 2088 * consumer calls clk_put() and the struct clk object is freed. 2089 */ 2090 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 2091 { 2092 return -ENXIO; 2093 } 2094 2095 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 2096 { 2097 WARN_ON_ONCE(1); 2098 } 2099 2100 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 2101 unsigned long parent_rate) 2102 { 2103 return -ENXIO; 2104 } 2105 2106 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 2107 { 2108 return -ENXIO; 2109 } 2110 2111 static const struct clk_ops clk_nodrv_ops = { 2112 .enable = clk_nodrv_prepare_enable, 2113 .disable = clk_nodrv_disable_unprepare, 2114 .prepare = clk_nodrv_prepare_enable, 2115 .unprepare = clk_nodrv_disable_unprepare, 2116 .set_rate = clk_nodrv_set_rate, 2117 .set_parent = clk_nodrv_set_parent, 2118 }; 2119 2120 /** 2121 * clk_unregister - unregister a currently registered clock 2122 * @clk: clock to unregister 2123 */ 2124 void clk_unregister(struct clk *clk) 2125 { 2126 unsigned long flags; 2127 2128 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2129 return; 2130 2131 clk_prepare_lock(); 2132 2133 if (clk->ops == &clk_nodrv_ops) { 2134 pr_err("%s: unregistered clock: %s\n", __func__, clk->name); 2135 goto out; 2136 } 2137 /* 2138 * Assign empty clock ops for consumers that might still hold 2139 * a reference to this clock. 2140 */ 2141 flags = clk_enable_lock(); 2142 clk->ops = &clk_nodrv_ops; 2143 clk_enable_unlock(flags); 2144 2145 if (!hlist_empty(&clk->children)) { 2146 struct clk *child; 2147 2148 /* Reparent all children to the orphan list. */ 2149 hlist_for_each_entry(child, &clk->children, child_node) 2150 clk_set_parent(child, NULL); 2151 } 2152 2153 clk_debug_unregister(clk); 2154 2155 hlist_del_init(&clk->child_node); 2156 2157 if (clk->prepare_count) 2158 pr_warn("%s: unregistering prepared clock: %s\n", 2159 __func__, clk->name); 2160 2161 kref_put(&clk->ref, __clk_release); 2162 out: 2163 clk_prepare_unlock(); 2164 } 2165 EXPORT_SYMBOL_GPL(clk_unregister); 2166 2167 static void devm_clk_release(struct device *dev, void *res) 2168 { 2169 clk_unregister(res); 2170 } 2171 2172 /** 2173 * devm_clk_register - resource managed clk_register() 2174 * @dev: device that is registering this clock 2175 * @hw: link to hardware-specific clock data 2176 * 2177 * Managed clk_register(). Clocks returned from this function are 2178 * automatically clk_unregister()ed on driver detach. See clk_register() for 2179 * more information. 2180 */ 2181 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2182 { 2183 struct clk *clk; 2184 int ret; 2185 2186 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); 2187 if (!clk) 2188 return ERR_PTR(-ENOMEM); 2189 2190 ret = _clk_register(dev, hw, clk); 2191 if (!ret) { 2192 devres_add(dev, clk); 2193 } else { 2194 devres_free(clk); 2195 clk = ERR_PTR(ret); 2196 } 2197 2198 return clk; 2199 } 2200 EXPORT_SYMBOL_GPL(devm_clk_register); 2201 2202 static int devm_clk_match(struct device *dev, void *res, void *data) 2203 { 2204 struct clk *c = res; 2205 if (WARN_ON(!c)) 2206 return 0; 2207 return c == data; 2208 } 2209 2210 /** 2211 * devm_clk_unregister - resource managed clk_unregister() 2212 * @clk: clock to unregister 2213 * 2214 * Deallocate a clock allocated with devm_clk_register(). Normally 2215 * this function will not need to be called and the resource management 2216 * code will ensure that the resource is freed. 2217 */ 2218 void devm_clk_unregister(struct device *dev, struct clk *clk) 2219 { 2220 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 2221 } 2222 EXPORT_SYMBOL_GPL(devm_clk_unregister); 2223 2224 /* 2225 * clkdev helpers 2226 */ 2227 int __clk_get(struct clk *clk) 2228 { 2229 if (clk) { 2230 if (!try_module_get(clk->owner)) 2231 return 0; 2232 2233 kref_get(&clk->ref); 2234 } 2235 return 1; 2236 } 2237 2238 void __clk_put(struct clk *clk) 2239 { 2240 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2241 return; 2242 2243 clk_prepare_lock(); 2244 kref_put(&clk->ref, __clk_release); 2245 clk_prepare_unlock(); 2246 2247 module_put(clk->owner); 2248 } 2249 2250 /*** clk rate change notifiers ***/ 2251 2252 /** 2253 * clk_notifier_register - add a clk rate change notifier 2254 * @clk: struct clk * to watch 2255 * @nb: struct notifier_block * with callback info 2256 * 2257 * Request notification when clk's rate changes. This uses an SRCU 2258 * notifier because we want it to block and notifier unregistrations are 2259 * uncommon. The callbacks associated with the notifier must not 2260 * re-enter into the clk framework by calling any top-level clk APIs; 2261 * this will cause a nested prepare_lock mutex. 2262 * 2263 * Pre-change notifier callbacks will be passed the current, pre-change 2264 * rate of the clk via struct clk_notifier_data.old_rate. The new, 2265 * post-change rate of the clk is passed via struct 2266 * clk_notifier_data.new_rate. 2267 * 2268 * Post-change notifiers will pass the now-current, post-change rate of 2269 * the clk in both struct clk_notifier_data.old_rate and struct 2270 * clk_notifier_data.new_rate. 2271 * 2272 * Abort-change notifiers are effectively the opposite of pre-change 2273 * notifiers: the original pre-change clk rate is passed in via struct 2274 * clk_notifier_data.new_rate and the failed post-change rate is passed 2275 * in via struct clk_notifier_data.old_rate. 2276 * 2277 * clk_notifier_register() must be called from non-atomic context. 2278 * Returns -EINVAL if called with null arguments, -ENOMEM upon 2279 * allocation failure; otherwise, passes along the return value of 2280 * srcu_notifier_chain_register(). 2281 */ 2282 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 2283 { 2284 struct clk_notifier *cn; 2285 int ret = -ENOMEM; 2286 2287 if (!clk || !nb) 2288 return -EINVAL; 2289 2290 clk_prepare_lock(); 2291 2292 /* search the list of notifiers for this clk */ 2293 list_for_each_entry(cn, &clk_notifier_list, node) 2294 if (cn->clk == clk) 2295 break; 2296 2297 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 2298 if (cn->clk != clk) { 2299 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); 2300 if (!cn) 2301 goto out; 2302 2303 cn->clk = clk; 2304 srcu_init_notifier_head(&cn->notifier_head); 2305 2306 list_add(&cn->node, &clk_notifier_list); 2307 } 2308 2309 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 2310 2311 clk->notifier_count++; 2312 2313 out: 2314 clk_prepare_unlock(); 2315 2316 return ret; 2317 } 2318 EXPORT_SYMBOL_GPL(clk_notifier_register); 2319 2320 /** 2321 * clk_notifier_unregister - remove a clk rate change notifier 2322 * @clk: struct clk * 2323 * @nb: struct notifier_block * with callback info 2324 * 2325 * Request no further notification for changes to 'clk' and frees memory 2326 * allocated in clk_notifier_register. 2327 * 2328 * Returns -EINVAL if called with null arguments; otherwise, passes 2329 * along the return value of srcu_notifier_chain_unregister(). 2330 */ 2331 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 2332 { 2333 struct clk_notifier *cn = NULL; 2334 int ret = -EINVAL; 2335 2336 if (!clk || !nb) 2337 return -EINVAL; 2338 2339 clk_prepare_lock(); 2340 2341 list_for_each_entry(cn, &clk_notifier_list, node) 2342 if (cn->clk == clk) 2343 break; 2344 2345 if (cn->clk == clk) { 2346 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 2347 2348 clk->notifier_count--; 2349 2350 /* XXX the notifier code should handle this better */ 2351 if (!cn->notifier_head.head) { 2352 srcu_cleanup_notifier_head(&cn->notifier_head); 2353 list_del(&cn->node); 2354 kfree(cn); 2355 } 2356 2357 } else { 2358 ret = -ENOENT; 2359 } 2360 2361 clk_prepare_unlock(); 2362 2363 return ret; 2364 } 2365 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 2366 2367 #ifdef CONFIG_OF 2368 /** 2369 * struct of_clk_provider - Clock provider registration structure 2370 * @link: Entry in global list of clock providers 2371 * @node: Pointer to device tree node of clock provider 2372 * @get: Get clock callback. Returns NULL or a struct clk for the 2373 * given clock specifier 2374 * @data: context pointer to be passed into @get callback 2375 */ 2376 struct of_clk_provider { 2377 struct list_head link; 2378 2379 struct device_node *node; 2380 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 2381 void *data; 2382 }; 2383 2384 static const struct of_device_id __clk_of_table_sentinel 2385 __used __section(__clk_of_table_end); 2386 2387 static LIST_HEAD(of_clk_providers); 2388 static DEFINE_MUTEX(of_clk_mutex); 2389 2390 /* of_clk_provider list locking helpers */ 2391 void of_clk_lock(void) 2392 { 2393 mutex_lock(&of_clk_mutex); 2394 } 2395 2396 void of_clk_unlock(void) 2397 { 2398 mutex_unlock(&of_clk_mutex); 2399 } 2400 2401 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 2402 void *data) 2403 { 2404 return data; 2405 } 2406 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 2407 2408 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 2409 { 2410 struct clk_onecell_data *clk_data = data; 2411 unsigned int idx = clkspec->args[0]; 2412 2413 if (idx >= clk_data->clk_num) { 2414 pr_err("%s: invalid clock index %d\n", __func__, idx); 2415 return ERR_PTR(-EINVAL); 2416 } 2417 2418 return clk_data->clks[idx]; 2419 } 2420 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 2421 2422 /** 2423 * of_clk_add_provider() - Register a clock provider for a node 2424 * @np: Device node pointer associated with clock provider 2425 * @clk_src_get: callback for decoding clock 2426 * @data: context pointer for @clk_src_get callback. 2427 */ 2428 int of_clk_add_provider(struct device_node *np, 2429 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 2430 void *data), 2431 void *data) 2432 { 2433 struct of_clk_provider *cp; 2434 2435 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL); 2436 if (!cp) 2437 return -ENOMEM; 2438 2439 cp->node = of_node_get(np); 2440 cp->data = data; 2441 cp->get = clk_src_get; 2442 2443 mutex_lock(&of_clk_mutex); 2444 list_add(&cp->link, &of_clk_providers); 2445 mutex_unlock(&of_clk_mutex); 2446 pr_debug("Added clock from %s\n", np->full_name); 2447 2448 return 0; 2449 } 2450 EXPORT_SYMBOL_GPL(of_clk_add_provider); 2451 2452 /** 2453 * of_clk_del_provider() - Remove a previously registered clock provider 2454 * @np: Device node pointer associated with clock provider 2455 */ 2456 void of_clk_del_provider(struct device_node *np) 2457 { 2458 struct of_clk_provider *cp; 2459 2460 mutex_lock(&of_clk_mutex); 2461 list_for_each_entry(cp, &of_clk_providers, link) { 2462 if (cp->node == np) { 2463 list_del(&cp->link); 2464 of_node_put(cp->node); 2465 kfree(cp); 2466 break; 2467 } 2468 } 2469 mutex_unlock(&of_clk_mutex); 2470 } 2471 EXPORT_SYMBOL_GPL(of_clk_del_provider); 2472 2473 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) 2474 { 2475 struct of_clk_provider *provider; 2476 struct clk *clk = ERR_PTR(-ENOENT); 2477 2478 /* Check if we have such a provider in our array */ 2479 list_for_each_entry(provider, &of_clk_providers, link) { 2480 if (provider->node == clkspec->np) 2481 clk = provider->get(clkspec, provider->data); 2482 if (!IS_ERR(clk)) 2483 break; 2484 } 2485 2486 return clk; 2487 } 2488 2489 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 2490 { 2491 struct clk *clk; 2492 2493 mutex_lock(&of_clk_mutex); 2494 clk = __of_clk_get_from_provider(clkspec); 2495 mutex_unlock(&of_clk_mutex); 2496 2497 return clk; 2498 } 2499 2500 int of_clk_get_parent_count(struct device_node *np) 2501 { 2502 return of_count_phandle_with_args(np, "clocks", "#clock-cells"); 2503 } 2504 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 2505 2506 const char *of_clk_get_parent_name(struct device_node *np, int index) 2507 { 2508 struct of_phandle_args clkspec; 2509 const char *clk_name; 2510 int rc; 2511 2512 if (index < 0) 2513 return NULL; 2514 2515 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 2516 &clkspec); 2517 if (rc) 2518 return NULL; 2519 2520 if (of_property_read_string_index(clkspec.np, "clock-output-names", 2521 clkspec.args_count ? clkspec.args[0] : 0, 2522 &clk_name) < 0) 2523 clk_name = clkspec.np->name; 2524 2525 of_node_put(clkspec.np); 2526 return clk_name; 2527 } 2528 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 2529 2530 /** 2531 * of_clk_init() - Scan and init clock providers from the DT 2532 * @matches: array of compatible values and init functions for providers. 2533 * 2534 * This function scans the device tree for matching clock providers and 2535 * calls their initialization functions 2536 */ 2537 void __init of_clk_init(const struct of_device_id *matches) 2538 { 2539 const struct of_device_id *match; 2540 struct device_node *np; 2541 2542 if (!matches) 2543 matches = &__clk_of_table; 2544 2545 for_each_matching_node_and_match(np, matches, &match) { 2546 of_clk_init_cb_t clk_init_cb = match->data; 2547 clk_init_cb(np); 2548 } 2549 } 2550 #endif 2551